deps: Bump to cAdvisor v0.47.1

Signed-off-by: David Porter <david@porter.me>
This commit is contained in:
David Porter
2023-01-11 16:05:25 -08:00
parent c9ed04762f
commit 761dd3640e
37 changed files with 144 additions and 7198 deletions

View File

@@ -40,6 +40,7 @@ import (
"github.com/google/cadvisor/utils/cpuload"
"github.com/docker/go-units"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
@@ -96,9 +97,6 @@ type containerData struct {
// Runs custom metric collectors.
collectorManager collector.CollectorManager
// nvidiaCollector updates stats for Nvidia GPUs attached to the container.
nvidiaCollector stats.Collector
// perfCollector updates stats for perf_event cgroup controller.
perfCollector stats.Collector
@@ -448,7 +446,6 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
onDemandChan: make(chan chan struct{}, 100),
clock: clock,
perfCollector: &stats.NoopCollector{},
nvidiaCollector: &stats.NoopCollector{},
resctrlCollector: &stats.NoopCollector{},
}
cont.info.ContainerReference = ref
@@ -688,12 +685,6 @@ func (cd *containerData) updateStats() error {
}
}
var nvidiaStatsErr error
if cd.nvidiaCollector != nil {
// This updates the Accelerators field of the stats struct
nvidiaStatsErr = cd.nvidiaCollector.UpdateStats(stats)
}
perfStatsErr := cd.perfCollector.UpdateStats(stats)
resctrlStatsErr := cd.resctrlCollector.UpdateStats(stats)
@@ -718,10 +709,6 @@ func (cd *containerData) updateStats() error {
if statsErr != nil {
return statsErr
}
if nvidiaStatsErr != nil {
klog.Errorf("error occurred while collecting nvidia stats for container %s: %s", cInfo.Name, err)
return nvidiaStatsErr
}
if perfStatsErr != nil {
klog.Errorf("error occurred while collecting perf stats for container %s: %s", cInfo.Name, err)
return perfStatsErr

View File

@@ -27,7 +27,6 @@ import (
"sync/atomic"
"time"
"github.com/google/cadvisor/accelerators"
"github.com/google/cadvisor/cache/memory"
"github.com/google/cadvisor/collector"
"github.com/google/cadvisor/container"
@@ -199,7 +198,6 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig
containerWatchers: []watcher.ContainerWatcher{},
eventsChannel: eventsChannel,
collectorHTTPClient: collectorHTTPClient,
nvidiaManager: accelerators.NewNvidiaManager(includedMetricsSet),
rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList,
containerEnvMetadataWhiteList: containerEnvMetadataWhiteList,
}
@@ -259,7 +257,6 @@ type manager struct {
containerWatchers []watcher.ContainerWatcher
eventsChannel chan watcher.ContainerEvent
collectorHTTPClient *http.Client
nvidiaManager stats.Manager
perfManager stats.Manager
resctrlManager resctrl.Manager
// List of raw container cgroup path prefix whitelist.
@@ -327,7 +324,6 @@ func (m *manager) Start() error {
}
func (m *manager) Stop() error {
defer m.nvidiaManager.Destroy()
defer m.destroyCollectors()
// Stop and wait on all quit channels.
for i, c := range m.quitChannels {
@@ -934,17 +930,6 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
return err
}
if !cgroups.IsCgroup2UnifiedMode() {
devicesCgroupPath, err := handler.GetCgroupPath("devices")
if err != nil {
klog.Warningf("Error getting devices cgroup path: %v", err)
} else {
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
if err != nil {
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %s: %s", cont.info.Name, err)
}
}
}
if m.includedMetrics.Has(container.PerfMetrics) {
perfCgroupPath, err := handler.GetCgroupPath("perf_event")
if err != nil {