Update cAdvisor to 2ed7198
* Add container_cpu_cfs_* metrics (CPU throttling due to limits)
* Add container_memory_swap metric
* Ensure minimum kernel version for thin_ls
Diff: c6c06d4...2ed7198
This commit is contained in:
1
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -110,6 +110,7 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
67
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
67
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -19,15 +19,18 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/devicemapper"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/machine"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
dockerutil "github.com/google/cadvisor/utils/docker"
|
||||
|
||||
@@ -178,6 +181,10 @@ func startThinPoolWatcher(dockerInfo *dockertypes.Info) (*devicemapper.ThinPoolW
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ensureThinLsKernelVersion(machine.KernelVersion()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dockerThinPoolName, err := dockerutil.DockerThinPoolName(*dockerInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -197,6 +204,66 @@ func startThinPoolWatcher(dockerInfo *dockertypes.Info) (*devicemapper.ThinPoolW
|
||||
return thinPoolWatcher, nil
|
||||
}
|
||||
|
||||
func ensureThinLsKernelVersion(kernelVersion string) error {
|
||||
// kernel 4.4.0 has the proper bug fixes to allow thin_ls to work without corrupting the thin pool
|
||||
minKernelVersion := semver.MustParse("4.4.0")
|
||||
// RHEL 7 kernel 3.10.0 release >= 366 has the proper bug fixes backported from 4.4.0 to allow
|
||||
// thin_ls to work without corrupting the thin pool
|
||||
minRhel7KernelVersion := semver.MustParse("3.10.0")
|
||||
|
||||
matches := version_re.FindStringSubmatch(kernelVersion)
|
||||
if len(matches) < 4 {
|
||||
return fmt.Errorf("error parsing kernel version: %q is not a semver", kernelVersion)
|
||||
}
|
||||
|
||||
sem, err := semver.Make(matches[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sem.GTE(minKernelVersion) {
|
||||
// kernel 4.4+ - good
|
||||
return nil
|
||||
}
|
||||
|
||||
// Certain RHEL/Centos 7.x kernels have a backport to fix the corruption bug
|
||||
if !strings.Contains(kernelVersion, ".el7") {
|
||||
// not a RHEL 7.x kernel - won't work
|
||||
return fmt.Errorf("kernel version 4.4.0 or later is required to use thin_ls - you have %q", kernelVersion)
|
||||
}
|
||||
|
||||
// RHEL/Centos 7.x from here on
|
||||
if sem.Major != 3 {
|
||||
// only 3.x kernels *may* work correctly
|
||||
return fmt.Errorf("RHEL/Centos 7.x kernel version 3.10.0-366 or later is required to use thin_ls - you have %q", kernelVersion)
|
||||
}
|
||||
|
||||
if sem.GT(minRhel7KernelVersion) {
|
||||
// 3.10.1+ - good
|
||||
return nil
|
||||
}
|
||||
|
||||
if sem.EQ(minRhel7KernelVersion) {
|
||||
// need to check release
|
||||
releaseRE := regexp.MustCompile(`^[^-]+-([0-9]+)\.`)
|
||||
releaseMatches := releaseRE.FindStringSubmatch(kernelVersion)
|
||||
if len(releaseMatches) != 2 {
|
||||
return fmt.Errorf("unable to determine RHEL/Centos 7.x kernel release from %q", kernelVersion)
|
||||
}
|
||||
|
||||
release, err := strconv.Atoi(releaseMatches[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing release %q: %v", releaseMatches[1], err)
|
||||
}
|
||||
|
||||
if release >= 366 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("RHEL/Centos 7.x kernel version 3.10.0-366 or later is required to use thin_ls - you have %q", kernelVersion)
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
|
||||
33
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
33
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -89,7 +89,7 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetri
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
stats := toContainerStats(libcontainerStats)
|
||||
stats := newContainerStats(libcontainerStats)
|
||||
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if pid == 0 {
|
||||
@@ -350,7 +350,7 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
||||
}
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
n := len(s.CpuStats.CpuUsage.PercpuUsage)
|
||||
@@ -361,9 +361,13 @@ func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
}
|
||||
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
}
|
||||
|
||||
func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
@@ -374,11 +378,12 @@ func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
}
|
||||
|
||||
func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
@@ -399,7 +404,7 @@ func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||
func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||
ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
|
||||
for i := range libcontainerStats.Interfaces {
|
||||
ret.Network.Interfaces[i] = info.InterfaceStats{
|
||||
@@ -421,18 +426,18 @@ func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.Containe
|
||||
}
|
||||
}
|
||||
|
||||
func toContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats {
|
||||
s := libcontainerStats.CgroupStats
|
||||
ret := new(info.ContainerStats)
|
||||
ret.Timestamp = time.Now()
|
||||
func newContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats {
|
||||
ret := &info.ContainerStats{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if s != nil {
|
||||
toContainerStats0(s, ret)
|
||||
toContainerStats1(s, ret)
|
||||
toContainerStats2(s, ret)
|
||||
if s := libcontainerStats.CgroupStats; s != nil {
|
||||
setCpuStats(s, ret)
|
||||
setDiskIoStats(s, ret)
|
||||
setMemoryStats(s, ret)
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
toContainerStats3(libcontainerStats, ret)
|
||||
setNetworkStats(libcontainerStats, ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user