Update cadvisor godeps to v0.28.3
This commit is contained in:
8
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
8
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -377,8 +377,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
|
||||
// Create cpu load reader.
|
||||
loadReader, err := cpuload.New()
|
||||
if err != nil {
|
||||
// TODO(rjnagal): Promote to warning once we support cpu load inside namespaces.
|
||||
glog.Infof("Could not initialize cpu load reader for %q: %s", ref.Name, err)
|
||||
glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
|
||||
} else {
|
||||
cont.loadReader = loadReader
|
||||
}
|
||||
@@ -467,7 +466,7 @@ func (c *containerData) housekeeping() {
|
||||
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
|
||||
glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
|
||||
}
|
||||
} else if len(stats) < numSamples {
|
||||
// Ignore, not enough stats yet.
|
||||
@@ -483,6 +482,7 @@ func (c *containerData) housekeeping() {
|
||||
instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds())
|
||||
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
|
||||
usageInHuman := units.HumanSize(float64(usageMemory))
|
||||
// Don't set verbosity since this is already protected by the logUsage flag.
|
||||
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
}
|
||||
}
|
||||
@@ -504,7 +504,7 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin
|
||||
err := c.updateStats()
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
glog.Warning("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
// Log if housekeeping took too long.
|
||||
|
||||
58
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
58
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -148,19 +148,19 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("cAdvisor running in container: %q", selfContainer)
|
||||
glog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
|
||||
|
||||
var (
|
||||
dockerStatus info.DockerStatus
|
||||
rktPath string
|
||||
)
|
||||
if tempDockerStatus, err := docker.Status(); err != nil {
|
||||
glog.Warningf("Unable to connect to Docker: %v", err)
|
||||
glog.V(5).Infof("Docker not connected: %v", err)
|
||||
} else {
|
||||
dockerStatus = tempDockerStatus
|
||||
}
|
||||
if tmpRktPath, err := rkt.RktPath(); err != nil {
|
||||
glog.Warningf("unable to connect to Rkt api service: %v", err)
|
||||
glog.V(5).Infof("Rkt not connected: %v", err)
|
||||
} else {
|
||||
rktPath = tmpRktPath
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
}
|
||||
crioInfo, err := crioClient.Info()
|
||||
if err != nil {
|
||||
glog.Warningf("unable to connect to CRI-O api service: %v", err)
|
||||
glog.V(5).Infof("CRI-O not connected: %v", err)
|
||||
}
|
||||
|
||||
context := fs.Context{
|
||||
@@ -222,13 +222,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
return nil, err
|
||||
}
|
||||
newManager.machineInfo = *machineInfo
|
||||
glog.Infof("Machine: %+v", newManager.machineInfo)
|
||||
glog.V(1).Infof("Machine: %+v", newManager.machineInfo)
|
||||
|
||||
versionInfo, err := getVersionInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("Version: %+v", *versionInfo)
|
||||
glog.V(1).Infof("Version: %+v", *versionInfo)
|
||||
|
||||
newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
|
||||
return newManager, nil
|
||||
@@ -267,12 +267,12 @@ type manager struct {
|
||||
func (self *manager) Start() error {
|
||||
err := docker.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Docker container factory registration failed: %v.", err)
|
||||
glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
|
||||
}
|
||||
|
||||
err = rkt.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the rkt container factory failed: %v", err)
|
||||
glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
|
||||
} else {
|
||||
watcher, err := rktwatcher.NewRktContainerWatcher()
|
||||
if err != nil {
|
||||
@@ -283,17 +283,17 @@ func (self *manager) Start() error {
|
||||
|
||||
err = containerd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the containerd container factory failed: %v", err)
|
||||
glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = crio.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the crio container factory failed: %v", err)
|
||||
glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = systemd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the systemd container factory failed: %v", err)
|
||||
glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = raw.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
@@ -326,12 +326,12 @@ func (self *manager) Start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("Starting recovery of all containers")
|
||||
glog.V(2).Infof("Starting recovery of all containers")
|
||||
err = self.detectSubcontainers("/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("Recovery completed")
|
||||
glog.V(2).Infof("Recovery completed")
|
||||
|
||||
// Watch for new container.
|
||||
quitWatcher := make(chan error)
|
||||
@@ -849,29 +849,25 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
|
||||
}
|
||||
glog.V(3).Infof("Got config from %q: %q", v, configFile)
|
||||
glog.V(4).Infof("Got config from %q: %q", v, configFile)
|
||||
|
||||
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
|
||||
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
|
||||
if err != nil {
|
||||
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
return err
|
||||
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
}
|
||||
err = cont.collectorManager.RegisterCollector(newCollector)
|
||||
if err != nil {
|
||||
glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
return err
|
||||
return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
}
|
||||
} else {
|
||||
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
|
||||
if err != nil {
|
||||
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
return err
|
||||
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
}
|
||||
err = cont.collectorManager.RegisterCollector(newCollector)
|
||||
if err != nil {
|
||||
glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
return err
|
||||
return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -946,11 +942,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
}
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
glog.Infof("Error getting devices cgroup path: %v", err)
|
||||
glog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if err != nil {
|
||||
glog.Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -959,7 +955,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
collectorConfigs := collector.GetCollectorConfigs(labels)
|
||||
err = m.registerCollectors(collectorConfigs, cont)
|
||||
if err != nil {
|
||||
glog.Infof("failed to register collectors for %q: %v", containerName, err)
|
||||
glog.Warningf("Failed to register collectors for %q: %v", containerName, err)
|
||||
}
|
||||
|
||||
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
|
||||
@@ -1179,7 +1175,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
||||
}
|
||||
|
||||
func (self *manager) watchForNewOoms() error {
|
||||
glog.Infof("Started watching for new ooms in manager")
|
||||
glog.V(2).Infof("Started watching for new ooms in manager")
|
||||
outStream := make(chan *oomparser.OomInstance, 10)
|
||||
oomLog, err := oomparser.New()
|
||||
if err != nil {
|
||||
@@ -1347,8 +1343,14 @@ func getVersionInfo() (*info.VersionInfo, error) {
|
||||
|
||||
kernel_version := machine.KernelVersion()
|
||||
container_os := machine.ContainerOsVersion()
|
||||
docker_version := docker.VersionString()
|
||||
docker_api_version := docker.APIVersionString()
|
||||
docker_version, err := docker.VersionString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
docker_api_version, err := docker.APIVersionString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &info.VersionInfo{
|
||||
KernelVersion: kernel_version,
|
||||
|
||||
4
vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go
generated
vendored
4
vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go
generated
vendored
@@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error {
|
||||
}
|
||||
|
||||
func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) {
|
||||
glog.Infof("starting detectRktContainers thread")
|
||||
glog.V(1).Infof("Starting detectRktContainers thread")
|
||||
ticker := time.Tick(10 * time.Second)
|
||||
curpods := make(map[string]*rktapi.Pod)
|
||||
|
||||
@@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan
|
||||
for id, pod := range curpods {
|
||||
if _, ok := newpods[id]; !ok {
|
||||
for _, cgroup := range podToCgroup(pod) {
|
||||
glog.Infof("cgroup to delete = %v", cgroup)
|
||||
glog.V(2).Infof("cgroup to delete = %v", cgroup)
|
||||
self.sendDestroyEvent(cgroup, events)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user