containerd/metrics/cgroups/cgroups.go
Mathieu Pasquet ed519bb5ce Collect cgroup stats one last time before exit
This commit adds a collection step in the Stop() task handler which will
retrieve the metrics available for this container at that time, and
store them until the next prometheus Collect() cycle.

This allows short-lived containers to be visible in prometheus, which
would otherwise be ignored (for example, running containerd-stress would
show something like 2 or 3 containers in the end, while now we can see
all of them). It also allows for more accurate collection when
long-running containers end (for example CPU usage could spike in the
last few seconds).

A simple case illustrating this with cpu usage would be:

  ctr run -t --rm docker.io/library/alpine:latest mycontainer sh -c 'yes > /dev/null & sleep 3 && pkill yes'

Signed-off-by: Mathieu Pasquet <mathieu.pasquet@alterway.fr>
2017-10-06 15:40:38 +02:00

87 lines
2.1 KiB
Go

// +build linux
package cgroups
import (
"github.com/containerd/cgroups"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/linux"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/runtime"
metrics "github.com/docker/go-metrics"
"golang.org/x/net/context"
)
// Config for the cgroups monitor
type Config struct {
NoPrometheus bool `toml:"no_prometheus"`
}
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.TaskMonitorPlugin,
ID: "cgroups",
Init: New,
Config: &Config{},
})
}
// New returns a new cgroups monitor
func New(ic *plugin.InitContext) (interface{}, error) {
var ns *metrics.Namespace
config := ic.Config.(*Config)
if !config.NoPrometheus {
ns = metrics.NewNamespace("container", "", nil)
}
collector := newCollector(ns)
oom, err := newOOMCollector(ns)
if err != nil {
return nil, err
}
if ns != nil {
metrics.Register(ns)
}
return &cgroupsMonitor{
collector: collector,
oom: oom,
context: ic.Context,
publisher: ic.Events,
}, nil
}
type cgroupsMonitor struct {
collector *collector
oom *oomCollector
context context.Context
publisher events.Publisher
}
func (m *cgroupsMonitor) Monitor(c runtime.Task) error {
info := c.Info()
t := c.(*linux.Task)
if err := m.collector.Add(info.ID, info.Namespace, t.Cgroup()); err != nil {
return err
}
return m.oom.Add(info.ID, info.Namespace, t.Cgroup(), m.trigger)
}
func (m *cgroupsMonitor) Stop(c runtime.Task) error {
info := c.Info()
t := c.(*linux.Task)
m.collector.collect(info.ID, info.Namespace, t.Cgroup(), m.collector.storedMetrics, nil)
m.collector.Remove(info.ID, info.Namespace)
return nil
}
func (m *cgroupsMonitor) trigger(id, namespace string, cg cgroups.Cgroup) {
ctx := namespaces.WithNamespace(m.context, namespace)
if err := m.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventsapi.TaskOOM{
ContainerID: id,
}); err != nil {
log.G(m.context).WithError(err).Error("post OOM event")
}
}