add metrics to gc
This commit is contained in:
@@ -57,6 +57,10 @@ func (m *MetricsForE2E) filterMetrics() {
|
||||
for _, metric := range InterestingApiServerMetrics {
|
||||
interestingApiServerMetrics[metric] = (*m).ApiServerMetrics[metric]
|
||||
}
|
||||
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
||||
for _, metric := range InterestingControllerManagerMetrics {
|
||||
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
||||
}
|
||||
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
||||
@@ -65,6 +69,7 @@ func (m *MetricsForE2E) filterMetrics() {
|
||||
}
|
||||
}
|
||||
(*m).ApiServerMetrics = interestingApiServerMetrics
|
||||
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
|
||||
(*m).KubeletMetrics = interestingKubeletMetrics
|
||||
}
|
||||
|
||||
@@ -76,6 +81,12 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
for _, interestingMetric := range InterestingControllerManagerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
|
||||
for _, interestingMetric := range InterestingKubeletMetrics {
|
||||
@@ -104,6 +115,12 @@ var InterestingApiServerMetrics = []string{
|
||||
"etcd_request_latencies_summary",
|
||||
}
|
||||
|
||||
var InterestingControllerManagerMetrics = []string{
|
||||
"garbage_collector_event_processing_latency_microseconds",
|
||||
"garbage_collector_dirty_processing_latency_microseconds",
|
||||
"garbage_collector_orphan_processing_latency_microseconds",
|
||||
}
|
||||
|
||||
var InterestingKubeletMetrics = []string{
|
||||
"kubelet_container_manager_latency_microseconds",
|
||||
"kubelet_docker_errors",
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
@@ -97,6 +98,23 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func gatherMetrics(f *framework.Framework) {
|
||||
By("Gathering metrics")
|
||||
var summary framework.TestDataSummary
|
||||
grabber, err := metrics.NewMetricsGrabber(f.Client, false, false, true, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
} else {
|
||||
summary = (*framework.MetricsForE2E)(&received)
|
||||
framework.Logf(summary.PrintHumanReadable())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
|
||||
@@ -147,6 +165,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
framework.Failf("remaining pods are: %#v", remainingPods)
|
||||
}
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should orphan pods created by rc", func() {
|
||||
@@ -193,5 +212,6 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
}); err != nil && err != wait.ErrWaitTimeout {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
@@ -424,6 +425,19 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
if gc.GraphHasUID(uids) {
|
||||
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
|
||||
}
|
||||
metric := &dto.Metric{}
|
||||
garbagecollector.EventProcessingLatency.Write(metric)
|
||||
count := float64(metric.Summary.GetSampleCount())
|
||||
sum := metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's eventQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.DirtyProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's dirtyQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.OrphanProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's orphanQueue is %.1f microseconds", sum/count)
|
||||
}
|
||||
|
||||
func TestOrphaning(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user