vendor: cadvisor v0.38.4
This commit is contained in:
181
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
181
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -422,7 +422,8 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.WorkingSet), timestamp: s.Timestamp}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
{
|
||||
name: "container_memory_failures_total",
|
||||
help: "Cumulative count of memory allocation failures.",
|
||||
valueType: prometheus.CounterValue,
|
||||
@@ -454,6 +455,33 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.MemoryNumaMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_memory_numa_pages",
|
||||
help: "Number of used pages per NUMA node",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"type", "scope", "node"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0)
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.ContainerData.NumaStats.File,
|
||||
[]string{"file", "container"}, s.Timestamp)...)
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.ContainerData.NumaStats.Anon,
|
||||
[]string{"anon", "container"}, s.Timestamp)...)
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.ContainerData.NumaStats.Unevictable,
|
||||
[]string{"unevictable", "container"}, s.Timestamp)...)
|
||||
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.HierarchicalData.NumaStats.File,
|
||||
[]string{"file", "hierarchy"}, s.Timestamp)...)
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.HierarchicalData.NumaStats.Anon,
|
||||
[]string{"anon", "hierarchy"}, s.Timestamp)...)
|
||||
values = append(values, getNumaStatsPerNode(s.Memory.HierarchicalData.NumaStats.Unevictable,
|
||||
[]string{"unevictable", "hierarchy"}, s.Timestamp)...)
|
||||
return values
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.AcceleratorUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
@@ -1549,41 +1577,48 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.PerfMetrics) {
|
||||
if includedMetrics.Has(container.PerCpuUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_perf_events_total",
|
||||
help: "Perf event metric.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return getPerCPUCorePerfEvents(s)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_perf_events_scaling_ratio",
|
||||
help: "Perf event metric scaling ratio.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return getPerCPUCoreScalingRatio(s)
|
||||
},
|
||||
}}...)
|
||||
} else {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_perf_events_total",
|
||||
help: "Perf event metric.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return getAggregatedCorePerfEvents(s)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_perf_events_scaling_ratio",
|
||||
help: "Perf event metric scaling ratio.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return getMinCoreScalingRatio(s)
|
||||
},
|
||||
}}...)
|
||||
}
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_perf_events_total",
|
||||
help: "Perf event metric.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.PerfStats))
|
||||
for _, metric := range s.PerfStats {
|
||||
values = append(values, metricValue{
|
||||
value: float64(metric.Value),
|
||||
labels: []string{strconv.Itoa(metric.Cpu), metric.Name},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_perf_events_scaling_ratio",
|
||||
help: "Perf event metric scaling ratio.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"cpu", "event"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.PerfStats))
|
||||
for _, metric := range s.PerfStats {
|
||||
values = append(values, metricValue{
|
||||
value: metric.ScalingRatio,
|
||||
labels: []string{strconv.Itoa(metric.Cpu), metric.Name},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_perf_uncore_events_total",
|
||||
help: "Perf uncore event metric.",
|
||||
@@ -1903,3 +1938,79 @@ var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
func sanitizeLabelName(name string) string {
|
||||
return invalidNameCharRE.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
func getNumaStatsPerNode(nodeStats map[uint8]uint64, labels []string, timestamp time.Time) metricValues {
|
||||
mValues := make(metricValues, 0, len(nodeStats))
|
||||
for node, stat := range nodeStats {
|
||||
nodeLabels := append(labels, strconv.FormatUint(uint64(node), 10))
|
||||
mValues = append(mValues, metricValue{value: float64(stat), labels: nodeLabels, timestamp: timestamp})
|
||||
}
|
||||
return mValues
|
||||
}
|
||||
|
||||
func getPerCPUCorePerfEvents(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.PerfStats))
|
||||
for _, metric := range s.PerfStats {
|
||||
values = append(values, metricValue{
|
||||
value: float64(metric.Value),
|
||||
labels: []string{strconv.Itoa(metric.Cpu), metric.Name},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func getPerCPUCoreScalingRatio(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.PerfStats))
|
||||
for _, metric := range s.PerfStats {
|
||||
values = append(values, metricValue{
|
||||
value: metric.ScalingRatio,
|
||||
labels: []string{strconv.Itoa(metric.Cpu), metric.Name},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func getAggregatedCorePerfEvents(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0)
|
||||
|
||||
perfEventStatAgg := make(map[string]uint64)
|
||||
// aggregate by event
|
||||
for _, perfStat := range s.PerfStats {
|
||||
perfEventStatAgg[perfStat.Name] += perfStat.Value
|
||||
}
|
||||
// create aggregated metrics
|
||||
for perfEvent, perfValue := range perfEventStatAgg {
|
||||
values = append(values, metricValue{
|
||||
value: float64(perfValue),
|
||||
labels: []string{"", perfEvent},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func getMinCoreScalingRatio(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0)
|
||||
perfEventStatMin := make(map[string]float64)
|
||||
// search for minimal value of scalin ratio for specific event
|
||||
for _, perfStat := range s.PerfStats {
|
||||
if _, ok := perfEventStatMin[perfStat.Name]; !ok {
|
||||
// found a new event
|
||||
perfEventStatMin[perfStat.Name] = perfStat.ScalingRatio
|
||||
} else if perfStat.ScalingRatio < perfEventStatMin[perfStat.Name] {
|
||||
// found a lower value of scaling ration so replace the minimal value
|
||||
perfEventStatMin[perfStat.Name] = perfStat.ScalingRatio
|
||||
}
|
||||
}
|
||||
|
||||
for perfEvent, perfScalingRatio := range perfEventStatMin {
|
||||
values = append(values, metricValue{
|
||||
value: perfScalingRatio,
|
||||
labels: []string{"", perfEvent},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
74
vendor/github.com/google/cadvisor/metrics/prometheus_fake.go
generated
vendored
74
vendor/github.com/google/cadvisor/metrics/prometheus_fake.go
generated
vendored
@@ -327,10 +327,20 @@ func (p testSubcontainersInfoProvider) GetRequestedContainersInfo(string, v2.Req
|
||||
ContainerData: info.MemoryStatsMemoryData{
|
||||
Pgfault: 10,
|
||||
Pgmajfault: 11,
|
||||
NumaStats: info.MemoryNumaStats{
|
||||
File: map[uint8]uint64{0: 16649, 1: 10000},
|
||||
Anon: map[uint8]uint64{0: 10000, 1: 7109},
|
||||
Unevictable: map[uint8]uint64{0: 8900, 1: 10000},
|
||||
},
|
||||
},
|
||||
HierarchicalData: info.MemoryStatsMemoryData{
|
||||
Pgfault: 12,
|
||||
Pgmajfault: 13,
|
||||
NumaStats: info.MemoryNumaStats{
|
||||
File: map[uint8]uint64{0: 36649, 1: 10000},
|
||||
Anon: map[uint8]uint64{0: 20000, 1: 7109},
|
||||
Unevictable: map[uint8]uint64{0: 8900, 1: 20000},
|
||||
},
|
||||
},
|
||||
Cache: 14,
|
||||
RSS: 15,
|
||||
@@ -625,44 +635,56 @@ func (p testSubcontainersInfoProvider) GetRequestedContainersInfo(string, v2.Req
|
||||
},
|
||||
PerfStats: []info.PerfStat{
|
||||
{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 123,
|
||||
Name: "instructions",
|
||||
Cpu: 0,
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 123,
|
||||
Name: "instructions",
|
||||
},
|
||||
Cpu: 0,
|
||||
},
|
||||
{
|
||||
ScalingRatio: 0.5,
|
||||
Value: 456,
|
||||
Name: "instructions",
|
||||
Cpu: 1,
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 0.5,
|
||||
Value: 456,
|
||||
Name: "instructions",
|
||||
},
|
||||
Cpu: 1,
|
||||
},
|
||||
{
|
||||
ScalingRatio: 0.66666666666,
|
||||
Value: 321,
|
||||
Name: "instructions_retired",
|
||||
Cpu: 0,
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 0.66666666666,
|
||||
Value: 321,
|
||||
Name: "instructions_retired",
|
||||
},
|
||||
Cpu: 0,
|
||||
},
|
||||
{
|
||||
ScalingRatio: 0.33333333333,
|
||||
Value: 789,
|
||||
Name: "instructions_retired",
|
||||
Cpu: 1,
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 0.33333333333,
|
||||
Value: 789,
|
||||
Name: "instructions_retired",
|
||||
},
|
||||
Cpu: 1,
|
||||
},
|
||||
},
|
||||
PerfUncoreStats: []info.PerfUncoreStat{
|
||||
{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 1231231512.0,
|
||||
Name: "cas_count_read",
|
||||
Socket: 0,
|
||||
PMU: "uncore_imc_0",
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 1231231512.0,
|
||||
Name: "cas_count_read",
|
||||
},
|
||||
Socket: 0,
|
||||
PMU: "uncore_imc_0",
|
||||
},
|
||||
{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 1111231331.0,
|
||||
Name: "cas_count_read",
|
||||
Socket: 1,
|
||||
PMU: "uncore_imc_0",
|
||||
PerfValue: info.PerfValue{
|
||||
ScalingRatio: 1.0,
|
||||
Value: 1111231331.0,
|
||||
Name: "cas_count_read",
|
||||
},
|
||||
Socket: 1,
|
||||
PMU: "uncore_imc_0",
|
||||
},
|
||||
},
|
||||
ReferencedMemory: 1234,
|
||||
|
Reference in New Issue
Block a user