vendor: cadvisor v0.39.0
Main upgrades: - github.com/opencontainers/runc v1.0.0-rc93 - github.com/containerd/containerd v1.4.4 - github.com/docker/docker v20.10.2 - github.com/mrunalp/fileutils v0.5.0 - github.com/opencontainers/selinux v1.8.0 - github.com/cilium/ebpf v0.2.0
This commit is contained in:
33
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
33
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -70,21 +70,22 @@ func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedM
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
var cgroupStats *cgroups.Stats
|
||||
readCgroupStats := true
|
||||
ignoreStatsError := false
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
// On cgroup v2 there are no stats at the root cgroup
|
||||
// so check whether it is the root cgroup
|
||||
// On cgroup v2 the root cgroup stats have been introduced in recent kernel versions,
|
||||
// so not all kernel versions have all the data. This means that stat fetching can fail
|
||||
// due to lacking cgroup stat files, but that some data is provided.
|
||||
if h.cgroupManager.Path("") == fs2.UnifiedMountpoint {
|
||||
readCgroupStats = false
|
||||
ignoreStatsError = true
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if readCgroupStats {
|
||||
cgroupStats, err = h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
|
||||
cgroupStats, err := h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
if !ignoreStatsError {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Ignoring errors when gathering stats for root cgroup since some controllers don't have stats on the root cgroup: %v", err)
|
||||
}
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
@@ -793,7 +794,12 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["file"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["anon"]
|
||||
ret.Memory.Swap = s.MemoryStats.SwapUsage.Usage
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["file_mapped"]
|
||||
} else if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
@@ -829,6 +835,10 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func setCPUSetStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.CpuSet.MemoryMigrate = s.CPUSetStats.MemoryMigrate
|
||||
}
|
||||
|
||||
func getNumaStats(memoryStats map[uint8]uint64) map[uint8]uint64 {
|
||||
stats := make(map[uint8]uint64, len(memoryStats))
|
||||
for node, usage := range memoryStats {
|
||||
@@ -906,6 +916,9 @@ func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics co
|
||||
if includedMetrics.Has(container.HugetlbUsageMetrics) {
|
||||
setHugepageStats(s, ret)
|
||||
}
|
||||
if includedMetrics.Has(container.CPUSetMetrics) {
|
||||
setCPUSetStats(s, ret)
|
||||
}
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
setNetworkStats(libcontainerStats, ret)
|
||||
|
Reference in New Issue
Block a user