Move from glog to klog

- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
Davanum Srinivas
2018-11-09 13:49:10 -05:00
parent 97baad34a7
commit 954996e231
1263 changed files with 10023 additions and 10076 deletions

View File

@@ -42,7 +42,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@@ -25,7 +25,7 @@ import (
"reflect"
"time"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@@ -99,7 +99,7 @@ type DeploymentController struct {
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
@@ -149,8 +149,8 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting deployment controller")
defer glog.Infof("Shutting down deployment controller")
klog.Infof("Starting deployment controller")
defer klog.Infof("Shutting down deployment controller")
if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
return
@@ -165,14 +165,14 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
func (dc *DeploymentController) addDeployment(obj interface{}) {
d := obj.(*apps.Deployment)
glog.V(4).Infof("Adding deployment %s", d.Name)
klog.V(4).Infof("Adding deployment %s", d.Name)
dc.enqueueDeployment(d)
}
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
oldD := old.(*apps.Deployment)
curD := cur.(*apps.Deployment)
glog.V(4).Infof("Updating deployment %s", oldD.Name)
klog.V(4).Infof("Updating deployment %s", oldD.Name)
dc.enqueueDeployment(curD)
}
@@ -190,7 +190,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) {
return
}
}
glog.V(4).Infof("Deleting deployment %s", d.Name)
klog.V(4).Infof("Deleting deployment %s", d.Name)
dc.enqueueDeployment(d)
}
@@ -211,7 +211,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
klog.V(4).Infof("ReplicaSet %s added.", rs.Name)
dc.enqueueDeployment(d)
return
}
@@ -222,7 +222,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
if len(ds) == 0 {
return
}
glog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
klog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
for _, d := range ds {
dc.enqueueDeployment(d)
}
@@ -242,7 +242,7 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet)
if len(deployments) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
klog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
}
return deployments
@@ -277,7 +277,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
klog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
dc.enqueueDeployment(d)
return
}
@@ -290,7 +290,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
if len(ds) == 0 {
return
}
glog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
klog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
for _, d := range ds {
dc.enqueueDeployment(d)
}
@@ -329,7 +329,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
klog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
dc.enqueueDeployment(d)
}
@@ -353,7 +353,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
return
}
}
glog.V(4).Infof("Pod %s deleted.", pod.Name)
klog.V(4).Infof("Pod %s deleted.", pod.Name)
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
// Sync if this Deployment now has no more Pods.
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
@@ -421,7 +421,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deploymen
}
rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil || rs.UID != controllerRef.UID {
glog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
klog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
return nil
}
@@ -481,13 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
}
if dc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
klog.V(2).Infof("Error syncing deployment %v: %v", key, err)
dc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
klog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
dc.queue.Forget(key)
}
@@ -559,9 +559,9 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
// This function is not meant to be invoked concurrently with the same key.
func (dc *DeploymentController) syncDeployment(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
klog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
klog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
@@ -570,7 +570,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
}
deployment, err := dc.dLister.Deployments(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(2).Infof("Deployment %v has been deleted", key)
klog.V(2).Infof("Deployment %v has been deleted", key)
return nil
}
if err != nil {

View File

@@ -21,7 +21,7 @@ import (
"reflect"
"time"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@@ -186,11 +186,11 @@ func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newSt
// Make it ratelimited so we stay on the safe side, eventually the Deployment should
// transition either to a Complete or to a TimedOut condition.
if after < time.Second {
glog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
klog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
dc.enqueueRateLimited(d)
return time.Duration(0)
}
glog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
klog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
// Add a second to avoid milliseconds skew in AddAfter.
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
dc.enqueueAfter(d, after+time.Second)

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"strconv"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@@ -49,11 +49,11 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
klog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == rollbackTo.Revision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
klog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the spec matches current deployment's podTemplate.Spec
@@ -75,7 +75,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
performedRollback := false
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template)
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
@@ -91,7 +91,7 @@ func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.
deploymentutil.SetDeploymentAnnotationsTo(d, rs)
performedRollback = true
} else {
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name)
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
}
@@ -111,7 +111,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
// It is assumed that the caller will have updated the deployment template appropriately (in case
// we want to rollback).
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
setRollbackTo(d, nil)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
return err

View File

@@ -20,9 +20,9 @@ import (
"fmt"
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/client-go/util/integer"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
@@ -91,7 +91,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
}
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
klog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down. We can scale down in the following 2 cases:
@@ -137,7 +137,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
if err != nil {
return false, nil
}
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
klog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
allRSs = append(oldRSs, newRS)
@@ -145,7 +145,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
if err != nil {
return false, nil
}
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
klog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
totalScaledDown := cleanupCount + scaledDownCount
return totalScaledDown > 0, nil
@@ -166,7 +166,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS
// cannot scale down this replica set.
continue
}
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
klog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
// no unhealthy replicas found, no scaling required.
continue
@@ -200,7 +200,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
// Cannot scale down.
return 0, nil
}
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
klog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))

View File

@@ -22,11 +22,11 @@ import (
"sort"
"strconv"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
@@ -248,7 +248,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
// error.
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
if dErr == nil {
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
return nil, err
case err != nil:
@@ -440,7 +440,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
}
sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes))
glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
klog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
for i := int32(0); i < diff; i++ {
rs := cleanableRSes[i]
@@ -448,7 +448,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
continue
}
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.

View File

@@ -24,7 +24,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@@ -24,7 +24,7 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@@ -186,7 +186,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
} else if v > max {
max = v
}
@@ -200,7 +200,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 {
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
} else if v >= max {
secMax = max
max = v
@@ -241,7 +241,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
if err != nil {
if oldRevision != "" {
glog.Warningf("Updating replica set revision OldRevision not int %s", err)
klog.Warningf("Updating replica set revision OldRevision not int %s", err)
return false
}
//If the RS annotation is empty then initialise it to 0
@@ -249,13 +249,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
if err != nil {
glog.Warningf("Updating replica set revision NewRevision not int %s", err)
klog.Warningf("Updating replica set revision NewRevision not int %s", err)
return false
}
if oldRevisionInt < newRevisionInt {
newRS.Annotations[RevisionAnnotation] = newRevision
annotationChanged = true
glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
}
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
@@ -376,7 +376,7 @@ func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, boo
}
intValue, err := strconv.Atoi(annotationValue)
if err != nil {
glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
return int32(0), false
}
return int32(intValue), true
@@ -787,7 +787,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
timedOut := from.Add(delta).Before(now)
glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
return timedOut
}