Collect cgroup stats one last time before exit

This commit adds a collection step in the Stop() task handler which will
retrieve the metrics available for this container at that time, and
store them until the next prometheus Collect() cycle.

This allows short-lived containers to be visible in prometheus, which
would otherwise be ignored (for example, running containerd-stress would
show something like 2 or 3 containers in the end, while now we can see
all of them). It also allows for more accurate collection when
long-running containers end (for example CPU usage could spike in the
last few seconds).

A simple case illustrating this with cpu usage would be:

  ctr run -t --rm docker.io/library/alpine:latest mycontainer sh -c 'yes > /dev/null & sleep 3 && pkill yes'

Signed-off-by: Mathieu Pasquet <mathieu.pasquet@alterway.fr>
This commit is contained in:
Mathieu Pasquet 2017-10-03 11:44:27 +02:00
parent 8ded4fe3a7
commit ed519bb5ce
3 changed files with 24 additions and 5 deletions

View File

@ -70,6 +70,8 @@ func (m *cgroupsMonitor) Monitor(c runtime.Task) error {
func (m *cgroupsMonitor) Stop(c runtime.Task) error {
info := c.Info()
t := c.(*linux.Task)
m.collector.collect(info.ID, info.Namespace, t.Cgroup(), m.collector.storedMetrics, nil)
m.collector.Remove(info.ID, info.Namespace)
return nil
}

View File

@ -31,6 +31,10 @@ func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
func (m *metric) collect(id, namespace string, stats *cgroups.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric) {
values := m.getValues(stats)
for _, v := range values {
ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{id, namespace}, v.l...)...)
select {
case ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{id, namespace}, v.l...)...):
default:
break
}
}
}

View File

@ -40,6 +40,7 @@ func newCollector(ns *metrics.Namespace) *collector {
c.metrics = append(c.metrics, memoryMetrics...)
c.metrics = append(c.metrics, hugetlbMetrics...)
c.metrics = append(c.metrics, blkioMetrics...)
c.storedMetrics = make(chan prometheus.Metric, 100*len(c.metrics))
ns.Add(c)
return c
}
@ -59,9 +60,10 @@ func taskID(id, namespace string) string {
type collector struct {
mu sync.RWMutex
cgroups map[string]*task
ns *metrics.Namespace
metrics []*metric
cgroups map[string]*task
ns *metrics.Namespace
metrics []*metric
storedMetrics chan prometheus.Metric
}
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
@ -77,12 +79,23 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
wg.Add(1)
go c.collect(t.id, t.namespace, t.cgroup, ch, wg)
}
SelectLoop:
for {
select {
case value := <-c.storedMetrics:
ch <- value
default:
break SelectLoop
}
}
c.mu.RUnlock()
wg.Wait()
}
func (c *collector) collect(id, namespace string, cg cgroups.Cgroup, ch chan<- prometheus.Metric, wg *sync.WaitGroup) {
defer wg.Done()
if wg != nil {
defer wg.Done()
}
stats, err := cg.Stat(cgroups.IgnoreNotExist)
if err != nil {
logrus.WithError(err).Errorf("stat cgroup %s", id)