Bump cAdvisor to v0.43.0
Bumping cAdvisor from v0.39.2 -> v0.43.0 * Also pin transitive dependencies * containerd v1.4.9 -> v1.4.11 * docker v20.10.2+incompatible> v20.10.7+incompatible Signed-off-by: David Porter <david@porter.me>
This commit is contained in:
181
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
181
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -18,57 +19,153 @@
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/stats"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const noInterval = 0
|
||||
|
||||
type collector struct {
|
||||
resctrl intelrdt.Manager
|
||||
stats.NoopDestroy
|
||||
id string
|
||||
interval time.Duration
|
||||
getContainerPids func() ([]string, error)
|
||||
resctrlPath string
|
||||
running bool
|
||||
destroyed bool
|
||||
numberOfNUMANodes int
|
||||
vendorID string
|
||||
mu sync.Mutex
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func newCollector(id string, resctrlPath string) *collector {
|
||||
collector := &collector{
|
||||
resctrl: intelrdt.NewManager(
|
||||
&configs.Config{
|
||||
IntelRdt: &configs.IntelRdt{},
|
||||
},
|
||||
id,
|
||||
resctrlPath,
|
||||
),
|
||||
}
|
||||
|
||||
return collector
|
||||
func newCollector(id string, getContainerPids func() ([]string, error), interval time.Duration, numberOfNUMANodes int, vendorID string, inHostNamespace bool) *collector {
|
||||
return &collector{id: id, interval: interval, getContainerPids: getContainerPids, numberOfNUMANodes: numberOfNUMANodes,
|
||||
vendorID: vendorID, mu: sync.Mutex{}, inHostNamespace: inHostNamespace}
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
stats.Resctrl = info.ResctrlStats{}
|
||||
func (c *collector) setup() error {
|
||||
var err error
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
|
||||
resctrlStats, err := c.resctrl.GetStats()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numberOfNUMANodes := len(*resctrlStats.MBMStats)
|
||||
|
||||
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, numberOfNUMANodes)
|
||||
stats.Resctrl.Cache = make([]info.CacheStats, 0, numberOfNUMANodes)
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.MBMStats {
|
||||
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
|
||||
info.MemoryBandwidthStats{
|
||||
TotalBytes: numaNodeStats.MBMTotalBytes,
|
||||
LocalBytes: numaNodeStats.MBMLocalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.CMTStats {
|
||||
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
|
||||
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
|
||||
if c.interval != noInterval {
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
} else {
|
||||
c.running = true
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(c.interval)
|
||||
c.mu.Lock()
|
||||
if c.destroyed {
|
||||
break
|
||||
}
|
||||
klog.V(5).Infof("Trying to check %q containers control group.", c.id)
|
||||
if c.running {
|
||||
err = c.checkMonitoringGroup()
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to check %q resctrl collector control group: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
} else {
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// There is no interval set, if setup fail, stop.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup container %q resctrl collector: %w", c.id, err)
|
||||
}
|
||||
c.running = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) checkMonitoringGroup() error {
|
||||
newPath, err := prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't obtain mon_group path: %v", err)
|
||||
}
|
||||
|
||||
// Check if container moved between control groups.
|
||||
if newPath != c.resctrlPath {
|
||||
err = c.clear()
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear previous monitoring group: %w", err)
|
||||
}
|
||||
c.resctrlPath = newPath
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.running {
|
||||
stats.Resctrl = info.ResctrlStats{}
|
||||
|
||||
resctrlStats, err := getIntelRDTStatsFrom(c.resctrlPath, c.vendorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, c.numberOfNUMANodes)
|
||||
stats.Resctrl.Cache = make([]info.CacheStats, 0, c.numberOfNUMANodes)
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.MBMStats {
|
||||
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
|
||||
info.MemoryBandwidthStats{
|
||||
TotalBytes: numaNodeStats.MBMTotalBytes,
|
||||
LocalBytes: numaNodeStats.MBMLocalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.CMTStats {
|
||||
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
|
||||
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) Destroy() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.running = false
|
||||
err := c.clear()
|
||||
if err != nil {
|
||||
klog.Errorf("trying to destroy %q resctrl collector but: %v", c.id, err)
|
||||
}
|
||||
c.destroyed = true
|
||||
}
|
||||
|
||||
func (c *collector) clear() error {
|
||||
// Not allowed to remove root or undefined resctrl directory.
|
||||
if c.id != rootContainer && c.resctrlPath != "" {
|
||||
// Remove only own prepared mon group.
|
||||
if strings.HasPrefix(filepath.Base(c.resctrlPath), monGroupPrefix) {
|
||||
err := os.RemoveAll(c.resctrlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear mon_group: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user