Move from glog to klog

- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
Davanum Srinivas
2018-11-09 13:49:10 -05:00
parent 97baad34a7
commit 954996e231
1263 changed files with 10023 additions and 10076 deletions

View File

@@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/wait"
@@ -138,7 +138,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
go wait.Until(func() {
err := m.UpdateCgroups()
if err != nil {
glog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err)
klog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err)
}
}, periodicQOSCgroupUpdateInterval, wait.NeverStop)
@@ -222,17 +222,17 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C
resources := m.getNodeAllocatable()
allocatableResource, ok := resources[v1.ResourceMemory]
if !ok {
glog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.")
klog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.")
return
}
allocatable := allocatableResource.Value()
if allocatable == 0 {
glog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.")
klog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.")
return
}
for qos, limits := range qosMemoryRequests {
glog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve)
klog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve)
}
// Calculate QOS memory limits
@@ -252,7 +252,7 @@ func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSCla
for qos, config := range configs {
stats, err := m.cgroupManager.GetResourceStats(config.Name)
if err != nil {
glog.V(2).Infof("[Container Manager] %v", err)
klog.V(2).Infof("[Container Manager] %v", err)
return
}
usage := stats.MemoryStats.Usage
@@ -312,7 +312,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
}
}
if updateSuccess {
glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
return nil
}
@@ -330,12 +330,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
for _, config := range qosConfigs {
err := m.cgroupManager.Update(config)
if err != nil {
glog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
return err
}
}
glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
return nil
}