Move from glog to klog

- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
Davanum Srinivas
2018-11-09 13:49:10 -05:00
parent 97baad34a7
commit 954996e231
1263 changed files with 10023 additions and 10076 deletions

View File

@@ -23,7 +23,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@@ -149,7 +149,7 @@ func NewDaemonSetsController(
failedPodsBackoff *flowcontrol.Backoff,
) (*DaemonSetsController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
@@ -176,13 +176,13 @@ func NewDaemonSetsController(
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*apps.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name)
klog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
},
UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*apps.DaemonSet)
curDS := cur.(*apps.DaemonSet)
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
},
DeleteFunc: dsc.deleteDaemonset,
@@ -257,7 +257,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
return
}
}
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
}
@@ -266,8 +266,8 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dsc.queue.ShutDown()
glog.Infof("Starting daemon sets controller")
defer glog.Infof("Shutting down daemon sets controller")
klog.Infof("Starting daemon sets controller")
defer klog.Infof("Shutting down daemon sets controller")
if !controller.WaitForCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
return
@@ -363,7 +363,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.Controlle
if len(daemonSets) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
klog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
history.Namespace, history.Name, history.Labels)
}
return daemonSets
@@ -386,7 +386,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
if ds == nil {
return
}
glog.V(4).Infof("ControllerRevision %s added.", history.Name)
klog.V(4).Infof("ControllerRevision %s added.", history.Name)
return
}
@@ -396,7 +396,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
if len(daemonSets) == 0 {
return
}
glog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
klog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
for _, ds := range daemonSets {
dsc.enqueueDaemonSet(ds)
}
@@ -429,7 +429,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
if ds == nil {
return
}
glog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
klog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
dsc.enqueueDaemonSet(ds)
return
}
@@ -442,7 +442,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
if len(daemonSets) == 0 {
return
}
glog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
klog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
for _, ds := range daemonSets {
dsc.enqueueDaemonSet(ds)
}
@@ -481,7 +481,7 @@ func (dsc *DaemonSetsController) deleteHistory(obj interface{}) {
if ds == nil {
return
}
glog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
klog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
dsc.enqueueDaemonSet(ds)
}
@@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
if err != nil {
return
}
glog.V(4).Infof("Pod %s added.", pod.Name)
klog.V(4).Infof("Pod %s added.", pod.Name)
dsc.expectations.CreationObserved(dsKey)
dsc.enqueueDaemonSet(ds)
return
@@ -519,7 +519,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
if len(dss) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s added.", pod.Name)
klog.V(4).Infof("Orphan Pod %s added.", pod.Name)
for _, ds := range dss {
dsc.enqueueDaemonSet(ds)
}
@@ -553,7 +553,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
if ds == nil {
return
}
glog.V(4).Infof("Pod %s updated.", curPod.Name)
klog.V(4).Infof("Pod %s updated.", curPod.Name)
dsc.enqueueDaemonSet(ds)
changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod)
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
@@ -571,7 +571,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
if len(dss) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
klog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
if labelChanged || controllerRefChanged {
for _, ds := range dss {
@@ -602,10 +602,10 @@ func (dsc *DaemonSetsController) requeueSuspendedDaemonPods(node string) {
dss := dsc.listSuspendedDaemonPods(node)
for _, dsKey := range dss {
if ns, name, err := cache.SplitMetaNamespaceKey(dsKey); err != nil {
glog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
klog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
continue
} else if ds, err := dsc.dsLister.DaemonSets(ns).Get(name); err != nil {
glog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
klog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
continue
} else {
dsc.enqueueDaemonSetRateLimited(ds)
@@ -682,7 +682,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
if err != nil {
return
}
glog.V(4).Infof("Pod %s deleted.", pod.Name)
klog.V(4).Infof("Pod %s deleted.", pod.Name)
dsc.expectations.DeletionObserved(dsKey)
dsc.enqueueDaemonSet(ds)
}
@@ -691,7 +691,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
klog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
node := obj.(*v1.Node)
@@ -753,7 +753,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error listing daemon sets: %v", err)
klog.V(4).Infof("Error listing daemon sets: %v", err)
return
}
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
@@ -820,7 +820,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s
for _, pod := range claimedPods {
nodeName, err := util.GetTargetNodeName(pod)
if err != nil {
glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
continue
}
@@ -899,7 +899,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
if inBackoff {
delay := dsc.failedPodsBackoff.Get(backoffKey)
glog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
pod.Namespace, pod.Name, node.Name, delay)
dsc.enqueueDaemonSetAfter(ds, delay)
continue
@@ -908,7 +908,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
dsc.failedPodsBackoff.Next(backoffKey, now)
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
glog.V(2).Infof(msg)
klog.V(2).Infof(msg)
// Emit an event so that it's discoverable to users.
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
podsToDelete = append(podsToDelete, pod.Name)
@@ -1003,7 +1003,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
errCh := make(chan error, createDiff+deleteDiff)
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
createWait := sync.WaitGroup{}
// If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
@@ -1057,7 +1057,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
return
}
if err != nil {
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.CreationObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
@@ -1068,7 +1068,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
// any skipped pods that we never attempted to start shouldn't be expected.
skippedPods := createDiff - batchSize
if errorCount < len(errCh) && skippedPods > 0 {
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
for i := 0; i < skippedPods; i++ {
dsc.expectations.CreationObserved(dsKey)
}
@@ -1078,14 +1078,14 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
}
}
glog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
deleteWait := sync.WaitGroup{}
deleteWait.Add(deleteDiff)
for i := 0; i < deleteDiff; i++ {
go func(ix int) {
defer deleteWait.Done()
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.DeletionObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
@@ -1145,7 +1145,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
}
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string, updateObservedGen bool) error {
glog.V(4).Infof("Updating daemon set status")
klog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
@@ -1208,7 +1208,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
@@ -1217,7 +1217,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
}
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(3).Infof("daemon set has been deleted %v", key)
klog.V(3).Infof("daemon set has been deleted %v", key)
dsc.expectations.DeleteExpectations(key)
return nil
}
@@ -1340,7 +1340,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
reasons, nodeInfo, err := dsc.simulate(newPod, node, ds)
if err != nil {
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
klog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
return false, false, false, err
}
@@ -1349,7 +1349,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
// into one result, e.g. selectedNode.
var insufficientResourceErr error
for _, r := range reasons {
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
klog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
switch reason := r.(type) {
case *predicates.InsufficientResourceError:
insufficientResourceErr = reason
@@ -1392,10 +1392,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
case
predicates.ErrPodAffinityNotMatch,
predicates.ErrServiceAffinityViolated:
glog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
klog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
return false, false, false, fmt.Errorf("unexpected reason: DaemonSet Predicates should not return reason %s", reason.GetReason())
default:
glog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
klog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
emitEvent = true
}