update cadvisor to v0.31.0
This commit is contained in:
51
vendor/github.com/google/cadvisor/accelerators/nvidia.go
generated
vendored
51
vendor/github.com/google/cadvisor/accelerators/nvidia.go
generated
vendored
@@ -31,7 +31,10 @@ import (
|
||||
)
|
||||
|
||||
type NvidiaManager struct {
|
||||
sync.RWMutex
|
||||
sync.Mutex
|
||||
|
||||
// true if there are NVIDIA devices present on the node
|
||||
devicesPresent bool
|
||||
|
||||
// true if the NVML library (libnvidia-ml.so.1) was loaded successfully
|
||||
nvmlInitialized bool
|
||||
@@ -51,20 +54,9 @@ func (nm *NvidiaManager) Setup() {
|
||||
return
|
||||
}
|
||||
|
||||
nm.initializeNVML()
|
||||
if nm.nvmlInitialized {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
glog.V(2).Info("Starting goroutine to initialize NVML")
|
||||
// TODO: use globalHousekeepingInterval
|
||||
for range time.Tick(time.Minute) {
|
||||
nm.initializeNVML()
|
||||
if nm.nvmlInitialized {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
nm.devicesPresent = true
|
||||
|
||||
initializeNVML(nm)
|
||||
}
|
||||
|
||||
// detectDevices returns true if a device with given pci id is present on the node.
|
||||
@@ -91,20 +83,18 @@ func detectDevices(vendorId string) bool {
|
||||
}
|
||||
|
||||
// initializeNVML initializes the NVML library and sets up the nvmlDevices map.
|
||||
func (nm *NvidiaManager) initializeNVML() {
|
||||
// This is defined as a variable to help in testing.
|
||||
var initializeNVML = func(nm *NvidiaManager) {
|
||||
if err := gonvml.Initialize(); err != nil {
|
||||
// This is under a logging level because otherwise we may cause
|
||||
// log spam if the drivers/nvml is not installed on the system.
|
||||
glog.V(4).Infof("Could not initialize NVML: %v", err)
|
||||
return
|
||||
}
|
||||
nm.nvmlInitialized = true
|
||||
numDevices, err := gonvml.DeviceCount()
|
||||
if err != nil {
|
||||
glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err)
|
||||
nm.Lock()
|
||||
// Even though we won't have GPU metrics, the library was initialized and should be shutdown when exiting.
|
||||
nm.nvmlInitialized = true
|
||||
nm.Unlock()
|
||||
return
|
||||
}
|
||||
glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
|
||||
@@ -122,10 +112,6 @@ func (nm *NvidiaManager) initializeNVML() {
|
||||
}
|
||||
nm.nvidiaDevices[int(minorNumber)] = device
|
||||
}
|
||||
nm.Lock()
|
||||
// Doing this at the end to avoid race in accessing nvidiaDevices in GetCollector.
|
||||
nm.nvmlInitialized = true
|
||||
nm.Unlock()
|
||||
}
|
||||
|
||||
// Destroy shuts down NVML.
|
||||
@@ -139,12 +125,21 @@ func (nm *NvidiaManager) Destroy() {
|
||||
// present in the devices.list file in the given devicesCgroupPath.
|
||||
func (nm *NvidiaManager) GetCollector(devicesCgroupPath string) (AcceleratorCollector, error) {
|
||||
nc := &NvidiaCollector{}
|
||||
nm.RLock()
|
||||
if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 {
|
||||
nm.RUnlock()
|
||||
|
||||
if !nm.devicesPresent {
|
||||
return nc, nil
|
||||
}
|
||||
nm.RUnlock()
|
||||
// Makes sure that we don't call initializeNVML() concurrently and
|
||||
// that we only call initializeNVML() when it's not initialized.
|
||||
nm.Lock()
|
||||
if !nm.nvmlInitialized {
|
||||
initializeNVML(nm)
|
||||
}
|
||||
if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 {
|
||||
nm.Unlock()
|
||||
return nc, nil
|
||||
}
|
||||
nm.Unlock()
|
||||
nvidiaMinorNumbers, err := parseDevicesCgroup(devicesCgroupPath)
|
||||
if err != nil {
|
||||
return nc, err
|
||||
|
7
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
7
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
@@ -15,7 +15,7 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -26,6 +26,9 @@ import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ErrDataNotFound is the error resulting if failed to find a container in memory cache.
|
||||
var ErrDataNotFound = errors.New("unable to find data in memory cache")
|
||||
|
||||
// TODO(vmarmol): See about refactoring this class, we have an unecessary redirection of containerCache and InMemoryCache.
|
||||
// containerCache is used to store per-container information
|
||||
type containerCache struct {
|
||||
@@ -101,7 +104,7 @@ func (self *InMemoryCache) RecentStats(name string, start, end time.Time, maxSta
|
||||
self.lock.RLock()
|
||||
defer self.lock.RUnlock()
|
||||
if cstore, ok = self.containerCacheMap[name]; !ok {
|
||||
return fmt.Errorf("unable to find data for container %v", name)
|
||||
return ErrDataNotFound
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
1
vendor/github.com/google/cadvisor/container/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/BUILD
generated
vendored
@@ -32,6 +32,7 @@ filegroup(
|
||||
"//vendor/github.com/google/cadvisor/container/crio:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/docker:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/mesos:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/raw:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/rkt:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/container/systemd:all-srcs",
|
||||
|
1
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
@@ -36,6 +36,7 @@ const (
|
||||
ContainerTypeSystemd
|
||||
ContainerTypeCrio
|
||||
ContainerTypeContainerd
|
||||
ContainerTypeMesos
|
||||
)
|
||||
|
||||
// Interface for container operation handlers.
|
||||
|
10
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
10
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
@@ -47,8 +47,8 @@ type containerdFactory struct {
|
||||
// Information about the mounted cgroup subsystems.
|
||||
cgroupSubsystems libcontainer.CgroupSubsystems
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
ignoreMetrics container.MetricSet
|
||||
fsInfo fs.FsInfo
|
||||
includedMetrics container.MetricSet
|
||||
}
|
||||
|
||||
func (self *containerdFactory) String() string {
|
||||
@@ -70,7 +70,7 @@ func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (self *containerdFactory) DebugInfo() map[string][]string {
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create containerd client: %v", err)
|
||||
@@ -140,7 +140,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
fsInfo: fsInfo,
|
||||
machineInfoFactory: factory,
|
||||
version: containerdVersion,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
14
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
14
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
@@ -48,7 +48,7 @@ type containerdContainerHandler struct {
|
||||
// Image name used for this container.
|
||||
image string
|
||||
// Filesystem handler.
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func newContainerdContainerHandler(
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@@ -127,7 +127,7 @@ func newContainerdContainerHandler(
|
||||
Aliases: []string{id, name},
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), ignoreMetrics)
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), includedMetrics)
|
||||
|
||||
handler := &containerdContainerHandler{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
@@ -135,7 +135,7 @@ func newContainerdContainerHandler(
|
||||
fsInfo: fsInfo,
|
||||
envs: make(map[string]string),
|
||||
labels: cntr.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
@@ -159,9 +159,9 @@ func (self *containerdContainerHandler) ContainerReference() (info.ContainerRefe
|
||||
|
||||
func (self *containerdContainerHandler) needNet() bool {
|
||||
// Since containerd does not handle networking ideally we need to return based
|
||||
// on ignoreMetrics list. Here the assumption is the presence of cri-containerd
|
||||
// on includedMetrics list. Here the assumption is the presence of cri-containerd
|
||||
// label
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
//TODO change it to exported cri-containerd constants
|
||||
return self.labels["io.cri-containerd.kind"] == "sandbox"
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) e
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
return nil
|
||||
|
8
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
8
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
@@ -55,7 +55,7 @@ type crioFactory struct {
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
client crioClient
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func (self *crioFactory) NewContainerHandler(name string, inHostNamespace bool)
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -136,7 +136,7 @@ var (
|
||||
)
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -162,7 +162,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
machineInfoFactory: factory,
|
||||
storageDriver: storageDriver(info.StorageDriver),
|
||||
storageDir: info.StorageRoot,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
18
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
18
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
@@ -63,7 +63,7 @@ type crioContainerHandler struct {
|
||||
// The IP address of the container
|
||||
ipAddress string
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
reference info.ContainerReference
|
||||
|
||||
@@ -83,7 +83,7 @@ func newCrioContainerHandler(
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@@ -141,7 +141,7 @@ func newCrioContainerHandler(
|
||||
Namespace: CrioNamespace,
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, ignoreMetrics)
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, includedMetrics)
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &crioContainerHandler{
|
||||
@@ -152,7 +152,7 @@ func newCrioContainerHandler(
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: cInfo.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func newCrioContainerHandler(
|
||||
handler.ipAddress = cInfo.IP
|
||||
|
||||
// we optionally collect disk usage metrics
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo)
|
||||
}
|
||||
// TODO for env vars we wanted to show from container.Config.Env from whitelist
|
||||
@@ -199,14 +199,14 @@ func (self *crioContainerHandler) ContainerReference() (info.ContainerReference,
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) needNet() bool {
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
return self.labels["io.kubernetes.container.name"] == "POD"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
@@ -222,11 +222,11 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
var device string
|
||||
|
8
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
8
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -110,7 +110,7 @@ type dockerFactory struct {
|
||||
|
||||
dockerAPIVersion []int
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
@@ -141,7 +141,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
@@ -309,7 +309,7 @@ func ensureThinLsKernelVersion(kernelVersion string) error {
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to communicate with docker daemon: %v", err)
|
||||
@@ -363,7 +363,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
machineInfoFactory: factory,
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
|
18
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
18
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -83,7 +83,7 @@ type dockerContainerHandler struct {
|
||||
// The IP address of the container
|
||||
ipAddress string
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
// the devicemapper poolname
|
||||
poolName string
|
||||
@@ -128,7 +128,7 @@ func newDockerContainerHandler(
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
@@ -203,7 +203,7 @@ func newDockerContainerHandler(
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: ctnr.Config.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
zfsParent: zfsParent,
|
||||
}
|
||||
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
||||
@@ -212,7 +212,7 @@ func newDockerContainerHandler(
|
||||
// This should not happen, report the error just in case
|
||||
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
|
||||
}
|
||||
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, ignoreMetrics)
|
||||
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, includedMetrics)
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.reference = info.ContainerReference{
|
||||
@@ -244,7 +244,7 @@ func newDockerContainerHandler(
|
||||
|
||||
handler.ipAddress = ipAddress
|
||||
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = &dockerFsHandler{
|
||||
fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo),
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
@@ -345,14 +345,14 @@ func (self *dockerContainerHandler) ContainerReference() (info.ContainerReferenc
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) needNet() bool {
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
return !self.networkMode.IsContainer()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
@@ -369,11 +369,11 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
var device string
|
||||
|
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -51,6 +51,7 @@ const (
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AcceleratorUsageMetrics MetricKind = "accelerator"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
|
21
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
21
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -43,16 +43,16 @@ type Handler struct {
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pid int
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
pidMetricsCache map[int]*info.CpuSchedstat
|
||||
}
|
||||
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler {
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler {
|
||||
return &Handler{
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pid: pid,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
pidMetricsCache: make(map[int]*info.CpuSchedstat),
|
||||
}
|
||||
}
|
||||
@@ -66,10 +66,10 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||
withPerCPU := h.includedMetrics.Has(container.PerCpuUsageMetrics)
|
||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||
|
||||
if !h.ignoreMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
@@ -85,7 +85,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.pid == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
@@ -93,7 +93,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
@@ -108,7 +108,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
@@ -498,14 +498,17 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["total_mapped_file"]
|
||||
} else {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["mapped_file"]
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
|
47
vendor/github.com/google/cadvisor/container/mesos/BUILD
generated
vendored
Normal file
47
vendor/github.com/google/cadvisor/container/mesos/BUILD
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"factory.go",
|
||||
"handler.go",
|
||||
"mesos_agent.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/google/cadvisor/container/mesos",
|
||||
importpath = "github.com/google/cadvisor/container/mesos",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/Rican7/retry:go_default_library",
|
||||
"//vendor/github.com/Rican7/retry/strategy:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/agent:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/client:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs:go_default_library",
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
190
vendor/github.com/google/cadvisor/container/mesos/client.go
generated
vendored
Normal file
190
vendor/github.com/google/cadvisor/container/mesos/client.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Rican7/retry"
|
||||
"github.com/Rican7/retry/strategy"
|
||||
"github.com/mesos/mesos-go/api/v1/lib"
|
||||
"github.com/mesos/mesos-go/api/v1/lib/agent"
|
||||
"github.com/mesos/mesos-go/api/v1/lib/agent/calls"
|
||||
mclient "github.com/mesos/mesos-go/api/v1/lib/client"
|
||||
"github.com/mesos/mesos-go/api/v1/lib/encoding/codecs"
|
||||
"github.com/mesos/mesos-go/api/v1/lib/httpcli"
|
||||
"net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRetryAttempts = 3
|
||||
invalidPID = -1
|
||||
)
|
||||
|
||||
var (
|
||||
mesosClientOnce sync.Once
|
||||
mesosClient *client
|
||||
)
|
||||
|
||||
type client struct {
|
||||
hc *httpcli.Client
|
||||
}
|
||||
|
||||
type mesosAgentClient interface {
|
||||
ContainerInfo(id string) (*containerInfo, error)
|
||||
ContainerPid(id string) (int, error)
|
||||
}
|
||||
|
||||
type containerInfo struct {
|
||||
cntr *mContainer
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// Client is an interface to query mesos agent http endpoints
|
||||
func Client() (mesosAgentClient, error) {
|
||||
mesosClientOnce.Do(func() {
|
||||
// Start Client
|
||||
apiURL := url.URL{
|
||||
Scheme: "http",
|
||||
Host: *MesosAgentAddress,
|
||||
Path: "/api/v1",
|
||||
}
|
||||
|
||||
mesosClient = &client{
|
||||
hc: httpcli.New(
|
||||
httpcli.Endpoint(apiURL.String()),
|
||||
httpcli.Codec(codecs.ByMediaType[codecs.MediaTypeProtobuf]),
|
||||
httpcli.Do(httpcli.With(httpcli.Timeout(*MesosAgentTimeout))),
|
||||
),
|
||||
}
|
||||
})
|
||||
return mesosClient, nil
|
||||
}
|
||||
|
||||
// ContainerInfo returns the container information of the given container id
|
||||
func (self *client) ContainerInfo(id string) (*containerInfo, error) {
|
||||
c, err := self.getContainer(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get labels of the container
|
||||
l, err := self.getLabels(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &containerInfo{
|
||||
cntr: c,
|
||||
labels: l,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get the Pid of the container
|
||||
func (self *client) ContainerPid(id string) (int, error) {
|
||||
var pid int
|
||||
var err error
|
||||
err = retry.Retry(
|
||||
func(attempt uint) error {
|
||||
c, err := self.ContainerInfo(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.cntr.ContainerStatus != nil {
|
||||
pid = int(*c.cntr.ContainerStatus.ExecutorPID)
|
||||
} else {
|
||||
err = fmt.Errorf("error fetching Pid")
|
||||
}
|
||||
return err
|
||||
},
|
||||
strategy.Limit(maxRetryAttempts),
|
||||
)
|
||||
if err != nil {
|
||||
return invalidPID, fmt.Errorf("failed to fetch pid")
|
||||
}
|
||||
return pid, err
|
||||
}
|
||||
|
||||
func (self *client) getContainer(id string) (*mContainer, error) {
|
||||
// Get all containers
|
||||
cntrs, err := self.getContainers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if there is a container with given id and return the container
|
||||
for _, c := range cntrs.Containers {
|
||||
if c.ContainerID.Value == id {
|
||||
return &c, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("can't locate container %s", id)
|
||||
}
|
||||
|
||||
func (self *client) getContainers() (mContainers, error) {
|
||||
req := calls.NonStreaming(calls.GetContainers())
|
||||
result, err := self.fetchAndDecode(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get mesos containers: %v", err)
|
||||
}
|
||||
cntrs := result.GetContainers
|
||||
return cntrs, nil
|
||||
}
|
||||
|
||||
func (self *client) getLabels(c *mContainer) (map[string]string, error) {
|
||||
// Get mesos agent state which contains all containers labels
|
||||
var s state
|
||||
req := calls.NonStreaming(calls.GetState())
|
||||
result, err := self.fetchAndDecode(req)
|
||||
if err != nil {
|
||||
return map[string]string{}, fmt.Errorf("failed to get mesos agent state: %v", err)
|
||||
}
|
||||
s.st = result.GetState
|
||||
|
||||
// Fetch labels from state object
|
||||
labels, err := s.FetchLabels(c.FrameworkID.Value, c.ExecutorID.Value)
|
||||
if err != nil {
|
||||
return labels, fmt.Errorf("error while fetching labels from executor: %v", err)
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (self *client) fetchAndDecode(req calls.RequestFunc) (*agent.Response, error) {
|
||||
var res mesos.Response
|
||||
var err error
|
||||
|
||||
// Send request
|
||||
err = retry.Retry(
|
||||
func(attempt uint) error {
|
||||
res, err = mesosClient.hc.Send(req, mclient.ResponseClassSingleton, nil)
|
||||
return err
|
||||
},
|
||||
strategy.Limit(maxRetryAttempts),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching %s: %s", req.Call(), err)
|
||||
}
|
||||
|
||||
// Decode the result
|
||||
var target agent.Response
|
||||
err = res.Decode(&target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while decoding response body from %s: %s", res, err)
|
||||
}
|
||||
|
||||
return &target, nil
|
||||
}
|
148
vendor/github.com/google/cadvisor/container/mesos/factory.go
generated
vendored
Normal file
148
vendor/github.com/google/cadvisor/container/mesos/factory.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
)
|
||||
|
||||
var MesosAgentAddress = flag.String("mesos_agent", "127.0.0.1:5051", "Mesos agent address")
|
||||
var MesosAgentTimeout = flag.Duration("mesos_agent_timeout", 10*time.Second, "Mesos agent timeout")
|
||||
|
||||
// The namespace under which mesos aliases are unique.
|
||||
const MesosNamespace = "mesos"
|
||||
|
||||
// Regexp that identifies mesos cgroups, containers started with
|
||||
// --cgroup-parent have another prefix than 'mesos'
|
||||
var mesosCgroupRegexp = regexp.MustCompile(`([a-z-0-9]{36})`)
|
||||
|
||||
// mesosFactory implements the interface ContainerHandlerFactory
|
||||
type mesosFactory struct {
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Information about the cgroup subsystems.
|
||||
cgroupSubsystems libcontainer.CgroupSubsystems
|
||||
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
includedMetrics map[container.MetricKind]struct{}
|
||||
|
||||
client mesosAgentClient
|
||||
}
|
||||
|
||||
func (self *mesosFactory) String() string {
|
||||
return MesosNamespace
|
||||
}
|
||||
|
||||
func (self *mesosFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newMesosContainerHandler(
|
||||
name,
|
||||
&self.cgroupSubsystems,
|
||||
self.machineInfoFactory,
|
||||
self.fsInfo,
|
||||
self.includedMetrics,
|
||||
inHostNamespace,
|
||||
client,
|
||||
)
|
||||
}
|
||||
|
||||
// ContainerNameToMesosId returns the Mesos ID from the full container name.
|
||||
func ContainerNameToMesosId(name string) string {
|
||||
id := path.Base(name)
|
||||
|
||||
if matches := mesosCgroupRegexp.FindStringSubmatch(id); matches != nil {
|
||||
return matches[1]
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// isContainerName returns true if the cgroup with associated name
|
||||
// corresponds to a mesos container.
|
||||
func isContainerName(name string) bool {
|
||||
// always ignore .mount cgroup even if associated with mesos and delegate to systemd
|
||||
if strings.HasSuffix(name, ".mount") {
|
||||
return false
|
||||
}
|
||||
return mesosCgroupRegexp.MatchString(path.Base(name))
|
||||
}
|
||||
|
||||
// The mesos factory can handle any container.
|
||||
func (self *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept bool, err error) {
|
||||
// if the container is not associated with mesos, we can't handle it or accept it.
|
||||
if !isContainerName(name) {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Check if the container is known to mesos and it is active.
|
||||
id := ContainerNameToMesosId(name)
|
||||
|
||||
_, err = self.client.ContainerInfo(id)
|
||||
if err != nil {
|
||||
return false, true, fmt.Errorf("error getting running container: %v", err)
|
||||
}
|
||||
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
func (self *mesosFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
func Register(
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
includedMetrics container.MetricSet,
|
||||
) error {
|
||||
client, err := Client()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create mesos agent client: %v", err)
|
||||
}
|
||||
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering mesos factory")
|
||||
factory := &mesosFactory{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
fsInfo: fsInfo,
|
||||
includedMetrics: includedMetrics,
|
||||
client: client,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
return nil
|
||||
}
|
213
vendor/github.com/google/cadvisor/container/mesos/handler.go
generated
vendored
Normal file
213
vendor/github.com/google/cadvisor/container/mesos/handler.go
generated
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for "mesos" containers.
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type mesosContainerHandler struct {
|
||||
// Name of the container for this handler.
|
||||
name string
|
||||
|
||||
// machineInfoFactory provides info.MachineInfo
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// File System Info
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
// Metrics to be included.
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
labels map[string]string
|
||||
|
||||
// Reference to the container
|
||||
reference info.ContainerReference
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
|
||||
func isRootCgroup(name string) bool {
|
||||
return name == "/"
|
||||
}
|
||||
|
||||
func newMesosContainerHandler(
|
||||
name string,
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
includedMetrics container.MetricSet,
|
||||
inHostNamespace bool,
|
||||
client mesosAgentClient,
|
||||
) (container.ContainerHandler, error) {
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name)
|
||||
for key, val := range cgroupSubsystems.MountPoints {
|
||||
cgroupPaths[key] = path.Join(val, name)
|
||||
}
|
||||
|
||||
// Generate the equivalent cgroup manager for this container.
|
||||
cgroupManager := &cgroupfs.Manager{
|
||||
Cgroups: &libcontainerconfigs.Cgroup{
|
||||
Name: name,
|
||||
},
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
id := ContainerNameToMesosId(name)
|
||||
|
||||
cinfo, err := client.ContainerInfo(id)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels := cinfo.labels
|
||||
pid, err := client.ContainerPid(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
reference := info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Namespace: MesosNamespace,
|
||||
Aliases: []string{id, name},
|
||||
}
|
||||
|
||||
handler := &mesosContainerHandler{
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
includedMetrics: includedMetrics,
|
||||
labels: labels,
|
||||
reference: reference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
// We only know the container by its one name.
|
||||
return self.reference, nil
|
||||
}
|
||||
|
||||
// Nothing to start up.
|
||||
func (self *mesosContainerHandler) Start() {}
|
||||
|
||||
// Nothing to clean up.
|
||||
func (self *mesosContainerHandler) Cleanup() {}
|
||||
|
||||
func (self *mesosContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
// TODO: Since we dont collect disk usage and network stats for mesos containers, we set
|
||||
// hasFilesystem and hasNetwork to false. Revisit when we support disk usage, network
|
||||
// stats for mesos containers.
|
||||
hasNetwork := false
|
||||
hasFilesystem := false
|
||||
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
|
||||
spec.Labels = self.labels
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := self.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
err = self.getFsStats(stats)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
path, ok := self.cgroupPaths[resource]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) GetContainerLabels() map[string]string {
|
||||
return self.labels
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) GetContainerIPAddress() string {
|
||||
// the IP address for the mesos container corresponds to the system ip address.
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
return common.ListContainers(self.name, self.cgroupPaths, listType)
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return self.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) Exists() bool {
|
||||
return common.CgroupExists(self.cgroupPaths)
|
||||
}
|
||||
|
||||
func (self *mesosContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeMesos
|
||||
}
|
147
vendor/github.com/google/cadvisor/container/mesos/mesos_agent.go
generated
vendored
Normal file
147
vendor/github.com/google/cadvisor/container/mesos/mesos_agent.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mesos/mesos-go/api/v1/lib"
|
||||
"github.com/mesos/mesos-go/api/v1/lib/agent"
|
||||
)
|
||||
|
||||
const (
|
||||
cpus = "cpus"
|
||||
schedulerSLA = "scheduler_sla"
|
||||
framework = "framework"
|
||||
source = "source"
|
||||
revocable = "revocable"
|
||||
nonRevocable = "non_revocable"
|
||||
)
|
||||
|
||||
type mContainers *agent.Response_GetContainers
|
||||
type mContainer = agent.Response_GetContainers_Container
|
||||
|
||||
type (
|
||||
state struct {
|
||||
st *agent.Response_GetState
|
||||
}
|
||||
)
|
||||
|
||||
// GetFramework finds a framework with the given id and returns nil if not found. Note that
|
||||
// this is different from the framework name.
|
||||
func (s *state) GetFramework(id string) (*mesos.FrameworkInfo, error) {
|
||||
for _, fw := range s.st.GetFrameworks.Frameworks {
|
||||
if fw.FrameworkInfo.ID.Value == id {
|
||||
return &fw.FrameworkInfo, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find framework id %s", id)
|
||||
}
|
||||
|
||||
// GetExecutor finds an executor with the given ID and returns nil if not found. Note that
|
||||
// this is different from the executor name.
|
||||
func (s *state) GetExecutor(id string) (*mesos.ExecutorInfo, error) {
|
||||
for _, exec := range s.st.GetExecutors.Executors {
|
||||
if exec.ExecutorInfo.ExecutorID.Value == id {
|
||||
return &exec.ExecutorInfo, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find executor with id %s", id)
|
||||
}
|
||||
|
||||
// GetTask returns a task launched by given executor.
|
||||
func (s *state) GetTask(exID string) (*mesos.Task, error) {
|
||||
// Check if task is in Launched Tasks list
|
||||
for _, t := range s.st.GetTasks.LaunchedTasks {
|
||||
if s.isMatchingTask(&t, exID) {
|
||||
return &t, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if task is in Queued Tasks list
|
||||
for _, t := range s.st.GetTasks.QueuedTasks {
|
||||
if s.isMatchingTask(&t, exID) {
|
||||
return &t, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find task matching executor id %s", exID)
|
||||
}
|
||||
|
||||
func (s *state) isMatchingTask(t *mesos.Task, exID string) bool {
|
||||
// MESOS-9111: For tasks launched through mesos command/default executor, the
|
||||
// executorID(which is same as the taskID) field is not filled in the TaskInfo object.
|
||||
// The workaround is compare with taskID field if executorID is empty
|
||||
if t.ExecutorID != nil {
|
||||
if t.ExecutorID.Value == exID {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if t.TaskID.Value == exID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) fetchLabelsFromTask(exID string, labels map[string]string) error {
|
||||
t, err := s.GetTask(exID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Identify revocability. Can be removed once we have a proper label
|
||||
for _, resource := range t.Resources {
|
||||
if resource.Name == cpus {
|
||||
if resource.Revocable != nil {
|
||||
labels[schedulerSLA] = revocable
|
||||
} else {
|
||||
labels[schedulerSLA] = nonRevocable
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range t.Labels.Labels {
|
||||
labels[l.Key] = *l.Value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state) FetchLabels(fwID string, exID string) (map[string]string, error) {
|
||||
labels := make(map[string]string)
|
||||
|
||||
// Look for the framework which launched the container.
|
||||
fw, err := s.GetFramework(fwID)
|
||||
if err != nil {
|
||||
return labels, fmt.Errorf("framework ID %q not found: %v", fwID, err)
|
||||
}
|
||||
labels[framework] = fw.Name
|
||||
|
||||
// Get the executor info of the container which contains all the task info.
|
||||
exec, err := s.GetExecutor(exID)
|
||||
if err != nil {
|
||||
return labels, fmt.Errorf("executor ID %q not found: %v", exID, err)
|
||||
}
|
||||
|
||||
labels[source] = *exec.Source
|
||||
|
||||
err = s.fetchLabelsFromTask(exID, labels)
|
||||
if err != nil {
|
||||
return labels, fmt.Errorf("failed to fetch labels from task with executor ID %s", exID)
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
22
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
22
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
@@ -17,6 +17,7 @@ package raw
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
@@ -43,8 +44,11 @@ type rawFactory struct {
|
||||
// Watcher for inotify events.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// List of metrics to be ignored.
|
||||
ignoreMetrics map[container.MetricKind]struct{}
|
||||
// List of metrics to be included.
|
||||
includedMetrics map[container.MetricKind]struct{}
|
||||
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawPrefixWhiteList []string
|
||||
}
|
||||
|
||||
func (self *rawFactory) String() string {
|
||||
@@ -56,12 +60,19 @@ func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.ignoreMetrics)
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.includedMetrics)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
|
||||
func (self *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
accept := name == "/" || !*dockerOnly
|
||||
|
||||
for _, prefix := range self.rawPrefixWhiteList {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
accept = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, accept, nil
|
||||
}
|
||||
|
||||
@@ -69,7 +80,7 @@ func (self *rawFactory) DebugInfo() map[string][]string {
|
||||
return common.DebugInfo(self.watcher.GetWatches())
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics map[container.MetricKind]struct{}) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error {
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
@@ -89,7 +100,8 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
||||
fsInfo: fsInfo,
|
||||
cgroupSubsystems: &cgroupSubsystems,
|
||||
watcher: watcher,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
rawPrefixWhiteList: rawPrefixWhiteList,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watch.ContainerWatchSource{watch.Raw})
|
||||
return nil
|
||||
|
4
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -49,7 +49,7 @@ func isRootCgroup(name string) bool {
|
||||
return name == "/"
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name)
|
||||
|
||||
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
|
||||
@@ -78,7 +78,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
pid = 1
|
||||
}
|
||||
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
return &rawContainerHandler{
|
||||
name: name,
|
||||
|
8
vendor/github.com/google/cadvisor/container/rkt/factory.go
generated
vendored
8
vendor/github.com/google/cadvisor/container/rkt/factory.go
generated
vendored
@@ -35,7 +35,7 @@ type rktFactory struct {
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
rktPath string
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func (self *rktFactory) NewContainerHandler(name string, inHostNamespace bool) (
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.ignoreMetrics)
|
||||
return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.includedMetrics)
|
||||
}
|
||||
|
||||
func (self *rktFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
@@ -67,7 +67,7 @@ func (self *rktFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
_, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to communicate with Rkt api service: %v", err)
|
||||
@@ -91,7 +91,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
fsInfo: fsInfo,
|
||||
cgroupSubsystems: &cgroupSubsystems,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
rktPath: rktPath,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Rkt})
|
||||
|
18
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
18
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -48,7 +48,7 @@ type rktContainerHandler struct {
|
||||
// Filesystem handler.
|
||||
fsHandler common.FsHandler
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
apiPod *rktapi.Pod
|
||||
|
||||
@@ -59,7 +59,7 @@ type rktContainerHandler struct {
|
||||
libcontainerHandler *libcontainer.Handler
|
||||
}
|
||||
|
||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
aliases := make([]string, 1)
|
||||
isPod := false
|
||||
|
||||
@@ -109,7 +109,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
rootfsStorageDir := getRootFs(rktPath, parsed)
|
||||
|
||||
@@ -125,14 +125,14 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
fsInfo: fsInfo,
|
||||
isPod: isPod,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
apiPod: apiPod,
|
||||
labels: labels,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, "", fsInfo)
|
||||
}
|
||||
|
||||
@@ -170,8 +170,8 @@ func (handler *rktContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasNetwork := handler.isPod && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasNetwork := handler.isPod && handler.includedMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasFilesystem := handler.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
|
||||
spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||
|
||||
@@ -186,11 +186,11 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if handler.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !handler.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
2
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
@@ -50,7 +50,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string {
|
||||
}
|
||||
|
||||
// Register registers the systemd container factory.
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
glog.V(1).Infof("Registering systemd factory")
|
||||
factory := &systemdFactory{}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
17
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
17
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -422,7 +422,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Stat fs failed. Error: %v", err)
|
||||
glog.V(4).Infof("Stat fs failed. Error: %v", err)
|
||||
} else {
|
||||
deviceSet[device] = struct{}{}
|
||||
fs.DeviceInfo = DeviceInfo{
|
||||
@@ -533,6 +533,21 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
}
|
||||
|
||||
mount, found := self.mounts[dir]
|
||||
// try the parent dir if not found until we reach the root dir
|
||||
// this is an issue on btrfs systems where the directory is not
|
||||
// the subvolume
|
||||
for !found {
|
||||
pathdir, _ := filepath.Split(dir)
|
||||
// break when we reach root
|
||||
if pathdir == "/" {
|
||||
break
|
||||
}
|
||||
// trim "/" from the new parent path otherwise the next possible
|
||||
// filepath.Split in the loop will not split the string any further
|
||||
dir = strings.TrimSuffix(pathdir, "/")
|
||||
mount, found = self.mounts[dir]
|
||||
}
|
||||
|
||||
if found && mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mount)
|
||||
if err != nil {
|
||||
|
3
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
3
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -358,6 +358,9 @@ type MemoryStats struct {
|
||||
// Units: Bytes.
|
||||
Swap uint64 `json:"swap"`
|
||||
|
||||
// The amount of memory used for mapped files (includes tmpfs/shmem)
|
||||
MappedFile uint64 `json:"mapped_file"`
|
||||
|
||||
// The amount of working set memory, this includes recently accessed memory,
|
||||
// dirty memory, and kernel memory. Working set is <= "usage".
|
||||
// Units: Bytes.
|
||||
|
1
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
@@ -19,6 +19,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/container/containerd:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/crio:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/docker:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/mesos:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/raw:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/rkt:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/systemd:go_default_library",
|
||||
|
67
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
67
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/google/cadvisor/container/containerd"
|
||||
"github.com/google/cadvisor/container/crio"
|
||||
"github.com/google/cadvisor/container/docker"
|
||||
"github.com/google/cadvisor/container/mesos"
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
"github.com/google/cadvisor/container/rkt"
|
||||
"github.com/google/cadvisor/container/systemd"
|
||||
@@ -141,7 +142,7 @@ type Manager interface {
|
||||
}
|
||||
|
||||
// New takes a memory storage and returns a new manager.
|
||||
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet, collectorHttpClient *http.Client) (Manager, error) {
|
||||
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, includedMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string) (Manager, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("manager requires memory storage")
|
||||
}
|
||||
@@ -203,20 +204,21 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
eventsChannel := make(chan watcher.ContainerEvent, 16)
|
||||
|
||||
newManager := &manager{
|
||||
containers: make(map[namespacedContainerName]*containerData),
|
||||
quitChannels: make([]chan error, 0, 2),
|
||||
memoryCache: memoryCache,
|
||||
fsInfo: fsInfo,
|
||||
cadvisorContainer: selfContainer,
|
||||
inHostNamespace: inHostNamespace,
|
||||
startupTime: time.Now(),
|
||||
maxHousekeepingInterval: maxHousekeepingInterval,
|
||||
allowDynamicHousekeeping: allowDynamicHousekeeping,
|
||||
ignoreMetrics: ignoreMetricsSet,
|
||||
containerWatchers: []watcher.ContainerWatcher{},
|
||||
eventsChannel: eventsChannel,
|
||||
collectorHttpClient: collectorHttpClient,
|
||||
nvidiaManager: &accelerators.NvidiaManager{},
|
||||
containers: make(map[namespacedContainerName]*containerData),
|
||||
quitChannels: make([]chan error, 0, 2),
|
||||
memoryCache: memoryCache,
|
||||
fsInfo: fsInfo,
|
||||
cadvisorContainer: selfContainer,
|
||||
inHostNamespace: inHostNamespace,
|
||||
startupTime: time.Now(),
|
||||
maxHousekeepingInterval: maxHousekeepingInterval,
|
||||
allowDynamicHousekeeping: allowDynamicHousekeeping,
|
||||
includedMetrics: includedMetricsSet,
|
||||
containerWatchers: []watcher.ContainerWatcher{},
|
||||
eventsChannel: eventsChannel,
|
||||
collectorHttpClient: collectorHttpClient,
|
||||
nvidiaManager: &accelerators.NvidiaManager{},
|
||||
rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList,
|
||||
}
|
||||
|
||||
machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace)
|
||||
@@ -283,21 +285,23 @@ type manager struct {
|
||||
startupTime time.Time
|
||||
maxHousekeepingInterval time.Duration
|
||||
allowDynamicHousekeeping bool
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
containerWatchers []watcher.ContainerWatcher
|
||||
eventsChannel chan watcher.ContainerEvent
|
||||
collectorHttpClient *http.Client
|
||||
nvidiaManager accelerators.AcceleratorManager
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawContainerCgroupPathPrefixWhiteList []string
|
||||
}
|
||||
|
||||
// Start the container manager.
|
||||
func (self *manager) Start() error {
|
||||
err := docker.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err := docker.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
|
||||
}
|
||||
|
||||
err = rkt.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = rkt.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
|
||||
} else {
|
||||
@@ -308,22 +312,27 @@ func (self *manager) Start() error {
|
||||
self.containerWatchers = append(self.containerWatchers, watcher)
|
||||
}
|
||||
|
||||
err = containerd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = containerd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = crio.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = crio.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = systemd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = mesos.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = systemd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = raw.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
|
||||
if err != nil {
|
||||
glog.Errorf("Registration of the raw container factory failed: %v", err)
|
||||
}
|
||||
@@ -619,6 +628,11 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
|
||||
for name, cont := range containers {
|
||||
inf, err := self.containerDataToContainerInfo(cont, query)
|
||||
if err != nil {
|
||||
// Ignore the error because of race condition and return best-effort result.
|
||||
if err == memory.ErrDataNotFound {
|
||||
glog.Warningf("Error getting data for container %s because of race condition", name)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
output[name] = *inf
|
||||
@@ -1072,22 +1086,25 @@ func (m *manager) destroyContainerLocked(containerName string) error {
|
||||
|
||||
// Detect all containers that have been added or deleted from the specified container.
|
||||
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
|
||||
m.containersLock.RLock()
|
||||
defer m.containersLock.RUnlock()
|
||||
|
||||
// Get all subcontainers recursively.
|
||||
m.containersLock.RLock()
|
||||
cont, ok := m.containers[namespacedContainerName{
|
||||
Name: containerName,
|
||||
}]
|
||||
m.containersLock.RUnlock()
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
|
||||
}
|
||||
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
|
||||
|
||||
m.containersLock.RLock()
|
||||
defer m.containersLock.RUnlock()
|
||||
|
||||
// Determine which were added and which were removed.
|
||||
allContainersSet := make(map[string]*containerData)
|
||||
for name, d := range m.containers {
|
||||
|
1
vendor/github.com/google/cadvisor/metrics/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/metrics/BUILD
generated
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
],
|
||||
|
154
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
154
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -114,7 +115,7 @@ type PrometheusCollector struct {
|
||||
// ContainerLabelsFunc specifies which base labels will be attached to all
|
||||
// exported metrics. If left to nil, the DefaultContainerLabels function
|
||||
// will be used instead.
|
||||
func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCollector {
|
||||
func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetrics container.MetricSet) *PrometheusCollector {
|
||||
if f == nil {
|
||||
f = DefaultContainerLabels
|
||||
}
|
||||
@@ -134,7 +135,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(time.Now().Unix())}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
},
|
||||
}
|
||||
if includedMetrics.Has(container.CpuUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_user_seconds_total",
|
||||
help: "Cumulative user cpu time consumed in seconds.",
|
||||
valueType: prometheus.CounterValue,
|
||||
@@ -197,7 +203,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_schedstat_run_seconds_total",
|
||||
help: "Time duration the processes of the container have run on the CPU.",
|
||||
valueType: prometheus.CounterValue,
|
||||
@@ -218,7 +229,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunPeriods)}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.CpuLoadMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_load_average_10s",
|
||||
help: "Value of container cpu load average over the last 10 seconds.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -226,6 +242,40 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return metricValues{{value: float64(s.Cpu.LoadAverage)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
help: "Number of tasks in given state",
|
||||
extraLabels: []string{"state"},
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.TaskStats.NrSleeping),
|
||||
labels: []string{"sleeping"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrRunning),
|
||||
labels: []string{"running"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrStopped),
|
||||
labels: []string{"stopped"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrUninterruptible),
|
||||
labels: []string{"uninterruptible"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrIoWait),
|
||||
labels: []string{"iowaiting"},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.MemoryUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_memory_cache",
|
||||
help: "Number of bytes of page cache memory.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -239,6 +289,13 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.RSS)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_mapped_file",
|
||||
help: "Size of memory mapped files in bytes.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.MappedFile)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_swap",
|
||||
help: "Container swap usage in bytes.",
|
||||
@@ -300,7 +357,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.AcceleratorUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_accelerator_memory_total_bytes",
|
||||
help: "Total accelerator memory.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -345,7 +407,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_fs_inodes_free",
|
||||
help: "Number of available Inodes",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -385,7 +452,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.Usage)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.DiskIOMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_fs_reads_bytes_total",
|
||||
help: "Cumulative count of bytes read",
|
||||
valueType: prometheus.CounterValue,
|
||||
@@ -547,7 +619,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.WeightedIoTime) / float64(time.Second)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
@@ -667,7 +744,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_tcp_usage_total",
|
||||
help: "tcp connection usage statistic for container",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -720,7 +802,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_udp_usage_total",
|
||||
help: "udp connection usage statistic for container",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@@ -745,37 +832,8 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
help: "Number of tasks in given state",
|
||||
extraLabels: []string{"state"},
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.TaskStats.NrSleeping),
|
||||
labels: []string{"sleeping"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrRunning),
|
||||
labels: []string{"running"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrStopped),
|
||||
labels: []string{"stopped"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrUninterruptible),
|
||||
labels: []string{"uninterruptible"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrIoWait),
|
||||
labels: []string{"iowaiting"},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
return c
|
||||
@@ -842,6 +900,19 @@ func DefaultContainerLabels(container *info.ContainerInfo) map[string]string {
|
||||
return set
|
||||
}
|
||||
|
||||
// BaseContainerLabels implements ContainerLabelsFunc. It only exports the
|
||||
// container name, first alias, and image name.
|
||||
func BaseContainerLabels(container *info.ContainerInfo) map[string]string {
|
||||
set := map[string]string{LabelID: container.Name}
|
||||
if len(container.Aliases) > 0 {
|
||||
set[LabelName] = container.Aliases[0]
|
||||
}
|
||||
if image := container.Spec.Image; len(image) > 0 {
|
||||
set[LabelImage] = image
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) {
|
||||
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
|
||||
if err != nil {
|
||||
@@ -889,6 +960,9 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
}
|
||||
|
||||
// Now for the actual metrics
|
||||
if len(container.Stats) == 0 {
|
||||
continue
|
||||
}
|
||||
stats := container.Stats[0]
|
||||
for _, cm := range c.containerMetrics {
|
||||
if cm.condition != nil && !cm.condition(container.Spec) {
|
||||
|
Reference in New Issue
Block a user