update cadvisor to v0.31.0
This commit is contained in:
21
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
21
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -43,16 +43,16 @@ type Handler struct {
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pid int
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
pidMetricsCache map[int]*info.CpuSchedstat
|
||||
}
|
||||
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler {
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler {
|
||||
return &Handler{
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pid: pid,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
pidMetricsCache: make(map[int]*info.CpuSchedstat),
|
||||
}
|
||||
}
|
||||
@@ -66,10 +66,10 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||
withPerCPU := h.includedMetrics.Has(container.PerCpuUsageMetrics)
|
||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||
|
||||
if !h.ignoreMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
@@ -85,7 +85,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.pid == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
@@ -93,7 +93,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
@@ -108,7 +108,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
@@ -498,14 +498,17 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["total_mapped_file"]
|
||||
} else {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["mapped_file"]
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
|
Reference in New Issue
Block a user