update cadvisor godeps to v0.30.0
This commit is contained in:
10
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
10
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
@@ -70,16 +70,16 @@ type InMemoryCache struct {
|
||||
backend storage.StorageDriver
|
||||
}
|
||||
|
||||
func (self *InMemoryCache) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
|
||||
func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
|
||||
var cstore *containerCache
|
||||
var ok bool
|
||||
|
||||
func() {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
if cstore, ok = self.containerCacheMap[ref.Name]; !ok {
|
||||
cstore = newContainerStore(ref, self.maxAge)
|
||||
self.containerCacheMap[ref.Name] = cstore
|
||||
if cstore, ok = self.containerCacheMap[cInfo.ContainerReference.Name]; !ok {
|
||||
cstore = newContainerStore(cInfo.ContainerReference, self.maxAge)
|
||||
self.containerCacheMap[cInfo.ContainerReference.Name] = cstore
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -87,7 +87,7 @@ func (self *InMemoryCache) AddStats(ref info.ContainerReference, stats *info.Con
|
||||
// TODO(monnand): To deal with long delay write operations, we
|
||||
// may want to start a pool of goroutines to do write
|
||||
// operations.
|
||||
if err := self.backend.AddStats(ref, stats); err != nil {
|
||||
if err := self.backend.AddStats(cInfo, stats); err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
4
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
@@ -5,18 +5,18 @@ go_library(
|
||||
srcs = [
|
||||
"container_hints.go",
|
||||
"fsHandler.go",
|
||||
"fsnotify_watcher.go",
|
||||
"helpers.go",
|
||||
"inotify_watcher.go",
|
||||
],
|
||||
importpath = "github.com/google/cadvisor/container/common",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/golang.org/x/exp/inotify:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -17,15 +17,15 @@ package common
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/inotify"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
// Watcher for container-related inotify events in the cgroup hierarchy.
|
||||
// Watcher for container-related fsnotify events in the cgroup hierarchy.
|
||||
//
|
||||
// Implementation is thread-safe.
|
||||
type InotifyWatcher struct {
|
||||
// Underlying inotify watcher.
|
||||
watcher *inotify.Watcher
|
||||
type FsnotifyWatcher struct {
|
||||
// Underlying fsnotify watcher.
|
||||
watcher *fsnotify.Watcher
|
||||
|
||||
// Map of containers being watched to cgroup paths watched for that container.
|
||||
containersWatched map[string]map[string]bool
|
||||
@@ -34,28 +34,28 @@ type InotifyWatcher struct {
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewInotifyWatcher() (*InotifyWatcher, error) {
|
||||
w, err := inotify.NewWatcher()
|
||||
func NewFsnotifyWatcher() (*FsnotifyWatcher, error) {
|
||||
w, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InotifyWatcher{
|
||||
return &FsnotifyWatcher{
|
||||
watcher: w,
|
||||
containersWatched: make(map[string]map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Add a watch to the specified directory. Returns if the container was already being watched.
|
||||
func (iw *InotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
func (iw *FsnotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
cgroupsWatched, alreadyWatched := iw.containersWatched[containerName]
|
||||
|
||||
// Register an inotify notification.
|
||||
// Register an fsnotify notification.
|
||||
if !cgroupsWatched[dir] {
|
||||
err := iw.watcher.AddWatch(dir, inotify.IN_CREATE|inotify.IN_DELETE|inotify.IN_MOVE)
|
||||
err := iw.watcher.Add(dir)
|
||||
if err != nil {
|
||||
return alreadyWatched, err
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (iw *InotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
}
|
||||
|
||||
// Remove watch from the specified directory. Returns if this was the last watch on the specified container.
|
||||
func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
func (iw *FsnotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
@@ -84,9 +84,9 @@ func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Remove the inotify watch if it exists.
|
||||
// Remove the fsnotify watch if it exists.
|
||||
if cgroupsWatched[dir] {
|
||||
err := iw.watcher.RemoveWatch(dir)
|
||||
err := iw.watcher.Remove(dir)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -103,22 +103,22 @@ func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
}
|
||||
|
||||
// Errors are returned on this channel.
|
||||
func (iw *InotifyWatcher) Error() chan error {
|
||||
return iw.watcher.Error
|
||||
func (iw *FsnotifyWatcher) Error() chan error {
|
||||
return iw.watcher.Errors
|
||||
}
|
||||
|
||||
// Events are returned on this channel.
|
||||
func (iw *InotifyWatcher) Event() chan *inotify.Event {
|
||||
return iw.watcher.Event
|
||||
func (iw *FsnotifyWatcher) Event() chan fsnotify.Event {
|
||||
return iw.watcher.Events
|
||||
}
|
||||
|
||||
// Closes the inotify watcher.
|
||||
func (iw *InotifyWatcher) Close() error {
|
||||
// Closes the fsnotify watcher.
|
||||
func (iw *FsnotifyWatcher) Close() error {
|
||||
return iw.watcher.Close()
|
||||
}
|
||||
|
||||
// Returns a map of containers to the cgroup paths being watched.
|
||||
func (iw *InotifyWatcher) GetWatches() map[string][]string {
|
||||
func (iw *FsnotifyWatcher) GetWatches() map[string][]string {
|
||||
out := make(map[string][]string, len(iw.containersWatched))
|
||||
for k, v := range iw.containersWatched {
|
||||
out[k] = mapToSlice(v)
|
2
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
2
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -40,7 +40,7 @@ func DebugInfo(watches map[string][]string) map[string][]string {
|
||||
lines = append(lines, fmt.Sprintf("\t%s", cg))
|
||||
}
|
||||
}
|
||||
out["Inotify watches"] = lines
|
||||
out["Fsnotify watches"] = lines
|
||||
|
||||
return out
|
||||
}
|
||||
|
3
vendor/github.com/google/cadvisor/container/containerd/BUILD
generated
vendored
3
vendor/github.com/google/cadvisor/container/containerd/BUILD
generated
vendored
@@ -18,15 +18,14 @@ go_library(
|
||||
"//vendor/github.com/containerd/containerd/dialer:go_default_library",
|
||||
"//vendor/github.com/containerd/containerd/errdefs:go_default_library",
|
||||
"//vendor/github.com/containerd/containerd/namespaces:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/empty:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runtime-spec/specs-go:go_default_library",
|
||||
|
25
vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
25
vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
@@ -16,6 +16,8 @@ package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -25,7 +27,7 @@ import (
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/dialer"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
pempty "github.com/golang/protobuf/ptypes/empty"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@@ -49,16 +51,29 @@ type containerdClient interface {
|
||||
var once sync.Once
|
||||
var ctrdClient containerdClient = nil
|
||||
|
||||
const (
|
||||
address = "/run/containerd/containerd.sock"
|
||||
maxBackoffDelay = 3 * time.Second
|
||||
connectionTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// Client creates a containerd client
|
||||
func Client() (containerdClient, error) {
|
||||
var retErr error
|
||||
once.Do(func() {
|
||||
tryConn, err := net.DialTimeout("unix", address, connectionTimeout)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("containerd: cannot unix dial containerd api service: %v", err)
|
||||
return
|
||||
}
|
||||
tryConn.Close()
|
||||
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithDialer(dialer.Dialer),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTimeout(2 * time.Second),
|
||||
grpc.WithBackoffMaxDelay(3 * time.Second),
|
||||
grpc.WithBackoffMaxDelay(maxBackoffDelay),
|
||||
grpc.WithTimeout(connectionTimeout),
|
||||
}
|
||||
unary, stream := newNSInterceptors(k8sNamespace)
|
||||
gopts = append(gopts,
|
||||
@@ -66,7 +81,7 @@ func Client() (containerdClient, error) {
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
|
||||
conn, err := grpc.Dial(dialer.DialAddress("/var/run/containerd/containerd.sock"), gopts...)
|
||||
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
@@ -101,7 +116,7 @@ func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
}
|
||||
|
||||
func (c *client) Version(ctx context.Context) (string, error) {
|
||||
response, err := c.versionService.Version(ctx, &pempty.Empty{})
|
||||
response, err := c.versionService.Version(ctx, &ptypes.Empty{})
|
||||
if err != nil {
|
||||
return "", errdefs.FromGRPC(err)
|
||||
}
|
||||
|
69
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
69
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"golang.org/x/net/context"
|
||||
@@ -37,31 +36,21 @@ import (
|
||||
)
|
||||
|
||||
type containerdContainerHandler struct {
|
||||
client containerdClient
|
||||
name string
|
||||
id string
|
||||
aliases []string
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
fsInfo fs.FsInfo
|
||||
poolName string
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
fsInfo fs.FsInfo
|
||||
// Metadata associated with the container.
|
||||
labels map[string]string
|
||||
envs map[string]string
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
reference info.ContainerReference
|
||||
envs map[string]string
|
||||
labels map[string]string
|
||||
// Image name used for this container.
|
||||
image string
|
||||
// The host root FS to read
|
||||
rootFs string
|
||||
// Filesystem handler.
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &containerdContainerHandler{}
|
||||
@@ -131,25 +120,27 @@ func newContainerdContainerHandler(
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
|
||||
containerReference := info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Namespace: k8sContainerdNamespace,
|
||||
Aliases: []string{id, name},
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), ignoreMetrics)
|
||||
|
||||
handler := &containerdContainerHandler{
|
||||
id: id,
|
||||
client: client,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootfs,
|
||||
fsInfo: fsInfo,
|
||||
envs: make(map[string]string),
|
||||
labels: make(map[string]string),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
pid: int(taskPid),
|
||||
creationTime: cntr.CreatedAt,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
envs: make(map[string]string),
|
||||
labels: cntr.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.labels = cntr.Labels
|
||||
handler.image = cntr.Image
|
||||
handler.aliases = []string{id, name}
|
||||
for _, envVar := range spec.Process.Env {
|
||||
if envVar != "" {
|
||||
splits := strings.SplitN(envVar, "=", 2)
|
||||
@@ -163,13 +154,7 @@ func newContainerdContainerHandler(
|
||||
}
|
||||
|
||||
func (self *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return info.ContainerReference{
|
||||
Id: self.id,
|
||||
Name: self.name,
|
||||
Namespace: k8sContainerdNamespace,
|
||||
Labels: self.labels,
|
||||
Aliases: self.aliases,
|
||||
}, nil
|
||||
return self.reference, nil
|
||||
}
|
||||
|
||||
func (self *containerdContainerHandler) needNet() bool {
|
||||
@@ -208,7 +193,7 @@ func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) e
|
||||
}
|
||||
|
||||
func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
||||
stats, err := self.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@@ -232,7 +217,7 @@ func (self *containerdContainerHandler) ListContainers(listType container.ListTy
|
||||
func (self *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
path, ok := self.cgroupPaths[resource]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
@@ -242,7 +227,7 @@ func (self *containerdContainerHandler) GetContainerLabels() map[string]string {
|
||||
}
|
||||
|
||||
func (self *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
||||
return self.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (self *containerdContainerHandler) Exists() bool {
|
||||
|
1
vendor/github.com/google/cadvisor/container/crio/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/crio/BUILD
generated
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
],
|
||||
|
75
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
75
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
@@ -29,48 +28,32 @@ import (
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type crioContainerHandler struct {
|
||||
name string
|
||||
id string
|
||||
aliases []string
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
// the CRI-O storage driver
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
rootfsStorageDir string
|
||||
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
|
||||
// Metadata associated with the container.
|
||||
labels map[string]string
|
||||
envs map[string]string
|
||||
labels map[string]string
|
||||
|
||||
// TODO
|
||||
// crio version handling...
|
||||
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
|
||||
// Image name used for this container.
|
||||
image string
|
||||
|
||||
// The host root FS to read
|
||||
rootFs string
|
||||
|
||||
// The network mode of the container
|
||||
// TODO
|
||||
|
||||
@@ -84,6 +67,10 @@ type crioContainerHandler struct {
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
|
||||
reference info.ContainerReference
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &crioContainerHandler{}
|
||||
@@ -150,25 +137,29 @@ func newCrioContainerHandler(
|
||||
rootfsStorageDir = filepath.Join(rootfsStorageDir, "diff")
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &crioContainerHandler{
|
||||
id: id,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
containerReference := info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Aliases: []string{cInfo.Name, id},
|
||||
Namespace: CrioNamespace,
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, ignoreMetrics)
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &crioContainerHandler{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: cInfo.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
|
||||
handler.creationTime = time.Unix(0, cInfo.CreatedTime)
|
||||
handler.pid = cInfo.Pid
|
||||
handler.aliases = append(handler.aliases, cInfo.Name, id)
|
||||
handler.labels = cInfo.Labels
|
||||
handler.image = cInfo.Image
|
||||
// TODO: we wantd to know graph driver DeviceId (dont think this is needed now)
|
||||
|
||||
@@ -204,13 +195,7 @@ func (self *crioContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return info.ContainerReference{
|
||||
Id: self.id,
|
||||
Name: self.name,
|
||||
Aliases: self.aliases,
|
||||
Namespace: CrioNamespace,
|
||||
Labels: self.labels,
|
||||
}, nil
|
||||
return self.reference, nil
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) needNet() bool {
|
||||
@@ -286,7 +271,7 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
||||
stats, err := self.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@@ -315,7 +300,7 @@ func (self *crioContainerHandler) ListContainers(listType container.ListType) ([
|
||||
func (self *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
path, ok := self.cgroupPaths[resource]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
@@ -329,7 +314,7 @@ func (self *crioContainerHandler) GetContainerIPAddress() string {
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
||||
return self.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) Exists() bool {
|
||||
|
1
vendor/github.com/google/cadvisor/container/docker/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/docker/BUILD
generated
vendored
@@ -27,7 +27,6 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/docker:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/zfs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
|
106
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
106
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -35,7 +35,6 @@ import (
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"golang.org/x/net/context"
|
||||
@@ -52,53 +51,29 @@ const (
|
||||
)
|
||||
|
||||
type dockerContainerHandler struct {
|
||||
client *docker.Client
|
||||
name string
|
||||
id string
|
||||
aliases []string
|
||||
|
||||
// machineInfoFactory provides info.MachineInfo
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
// the docker storage driver
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
rootfsStorageDir string
|
||||
|
||||
// devicemapper state
|
||||
|
||||
// the devicemapper poolname
|
||||
poolName string
|
||||
// the devicemapper device id for the container
|
||||
deviceID string
|
||||
|
||||
// zfs Filesystem
|
||||
zfsFilesystem string
|
||||
|
||||
// zfsParent is the parent for docker zfs
|
||||
zfsParent string
|
||||
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
|
||||
// Metadata associated with the container.
|
||||
labels map[string]string
|
||||
envs map[string]string
|
||||
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
labels map[string]string
|
||||
|
||||
// Image name used for this container.
|
||||
image string
|
||||
|
||||
// The host root FS to read
|
||||
rootFs string
|
||||
|
||||
// The network mode of the container
|
||||
networkMode dockercontainer.NetworkMode
|
||||
|
||||
@@ -110,14 +85,19 @@ type dockerContainerHandler struct {
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
// thin pool watcher
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
// the devicemapper poolname
|
||||
poolName string
|
||||
|
||||
// zfs watcher
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
// zfsParent is the parent for docker zfs
|
||||
zfsParent string
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
|
||||
// Reference to the container
|
||||
reference info.ContainerReference
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||
@@ -210,46 +190,42 @@ func newDockerContainerHandler(
|
||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
client: client,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
poolName: thinPoolName,
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
zfsParent: zfsParent,
|
||||
}
|
||||
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := client.ContainerInspect(context.Background(), id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &dockerContainerHandler{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
storageDriver: storageDriver,
|
||||
poolName: thinPoolName,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: ctnr.Config.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
zfsParent: zfsParent,
|
||||
}
|
||||
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
||||
handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
|
||||
if err != nil {
|
||||
// This should not happen, report the error just in case
|
||||
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
|
||||
}
|
||||
handler.pid = ctnr.State.Pid
|
||||
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, ignoreMetrics)
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
|
||||
handler.labels = ctnr.Config.Labels
|
||||
handler.reference = info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Aliases: []string{strings.TrimPrefix(ctnr.Name, "/"), id},
|
||||
Namespace: DockerNamespace,
|
||||
}
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
||||
handler.restartCount = ctnr.RestartCount
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
@@ -273,7 +249,7 @@ func newDockerContainerHandler(
|
||||
fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo),
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
deviceID: handler.deviceID,
|
||||
deviceID: ctnr.GraphDriver.Data["DeviceId"],
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
}
|
||||
}
|
||||
@@ -365,13 +341,7 @@ func (self *dockerContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return info.ContainerReference{
|
||||
Id: self.id,
|
||||
Name: self.name,
|
||||
Aliases: self.aliases,
|
||||
Namespace: DockerNamespace,
|
||||
Labels: self.labels,
|
||||
}, nil
|
||||
return self.reference, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) needNet() bool {
|
||||
@@ -455,7 +425,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
|
||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
||||
stats, err := self.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@@ -484,7 +454,7 @@ func (self *dockerContainerHandler) ListContainers(listType container.ListType)
|
||||
func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
path, ok := self.cgroupPaths[resource]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
@@ -498,7 +468,7 @@ func (self *dockerContainerHandler) GetContainerIPAddress() string {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
||||
return self.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) Exists() bool {
|
||||
|
21
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
21
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -41,16 +41,17 @@ type ContainerHandlerFactory interface {
|
||||
type MetricKind string
|
||||
|
||||
const (
|
||||
CpuUsageMetrics MetricKind = "cpu"
|
||||
PerCpuUsageMetrics MetricKind = "percpu"
|
||||
MemoryUsageMetrics MetricKind = "memory"
|
||||
CpuLoadMetrics MetricKind = "cpuLoad"
|
||||
DiskIOMetrics MetricKind = "diskIO"
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
CpuUsageMetrics MetricKind = "cpu"
|
||||
ProcessSchedulerMetrics MetricKind = "sched"
|
||||
PerCpuUsageMetrics MetricKind = "percpu"
|
||||
MemoryUsageMetrics MetricKind = "memory"
|
||||
CpuLoadMetrics MetricKind = "cpuLoad"
|
||||
DiskIOMetrics MetricKind = "diskIO"
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
func (mk MetricKind) String() string {
|
||||
|
5
vendor/github.com/google/cadvisor/container/libcontainer/BUILD
generated
vendored
5
vendor/github.com/google/cadvisor/container/libcontainer/BUILD
generated
vendored
@@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
srcs = [
|
||||
"handler.go",
|
||||
"helpers.go",
|
||||
],
|
||||
cgo = True,
|
||||
importpath = "github.com/google/cadvisor/container/libcontainer",
|
||||
visibility = ["//visibility:public"],
|
||||
|
566
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
Normal file
566
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
Normal file
@@ -0,0 +1,566 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"bytes"
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type Handler struct {
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pid int
|
||||
ignoreMetrics container.MetricSet
|
||||
pidMetricsCache map[int]*info.CpuSchedstat
|
||||
}
|
||||
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler {
|
||||
return &Handler{
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pid: pid,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
pidMetricsCache: make(map[int]*info.CpuSchedstat),
|
||||
}
|
||||
}
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
cgroupStats, err := h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||
|
||||
if !h.ignoreMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if h.pid == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp = t
|
||||
}
|
||||
|
||||
t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]*info.CpuSchedstat) (info.CpuSchedstat, error) {
|
||||
for _, pid := range pids {
|
||||
f, err := os.Open(path.Join(rootFs, "proc", strconv.Itoa(pid), "schedstat"))
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("couldn't open scheduler statistics for process %d: %v", pid, err)
|
||||
}
|
||||
defer f.Close()
|
||||
contents, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("couldn't read scheduler statistics for process %d: %v", pid, err)
|
||||
}
|
||||
rawMetrics := bytes.Split(bytes.TrimRight(contents, "\n"), []byte(" "))
|
||||
if len(rawMetrics) != 3 {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("unexpected number of metrics in schedstat file for process %d", pid)
|
||||
}
|
||||
cacheEntry, ok := pidMetricsCache[pid]
|
||||
if !ok {
|
||||
cacheEntry = &info.CpuSchedstat{}
|
||||
pidMetricsCache[pid] = cacheEntry
|
||||
}
|
||||
for i, rawMetric := range rawMetrics {
|
||||
metric, err := strconv.ParseUint(string(rawMetric), 10, 64)
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("parsing error while reading scheduler statistics for process: %d: %v", pid, err)
|
||||
}
|
||||
switch i {
|
||||
case 0:
|
||||
cacheEntry.RunTime = metric
|
||||
case 1:
|
||||
cacheEntry.RunqueueTime = metric
|
||||
case 2:
|
||||
cacheEntry.RunPeriods = metric
|
||||
}
|
||||
}
|
||||
}
|
||||
schedstats := info.CpuSchedstat{}
|
||||
for _, v := range pidMetricsCache {
|
||||
schedstats.RunPeriods += v.RunPeriods
|
||||
schedstats.RunqueueTime += v.RunqueueTime
|
||||
schedstats.RunTime += v.RunTime
|
||||
}
|
||||
return schedstats, nil
|
||||
}
|
||||
|
||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||
|
||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||
if err != nil {
|
||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||
}
|
||||
|
||||
return ifaceStats, nil
|
||||
}
|
||||
|
||||
var (
|
||||
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
||||
)
|
||||
|
||||
func isIgnoredDevice(ifName string) bool {
|
||||
for _, prefix := range ignoredDevicePrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||
file, err := os.Open(netStatsFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
// Discard header lines
|
||||
for i := 0; i < 2; i++ {
|
||||
if b := scanner.Scan(); !b {
|
||||
return nil, scanner.Err()
|
||||
}
|
||||
}
|
||||
|
||||
stats := []info.InterfaceStats{}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.Replace(line, ":", "", -1)
|
||||
|
||||
fields := strings.Fields(line)
|
||||
// If the format of the line is invalid then don't trust any of the stats
|
||||
// in this file.
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid interface stats line: %v", line)
|
||||
}
|
||||
|
||||
devName := fields[0]
|
||||
if isIgnoredDevice(devName) {
|
||||
continue
|
||||
}
|
||||
|
||||
i := info.InterfaceStats{
|
||||
Name: devName,
|
||||
}
|
||||
|
||||
statFields := append(fields[1:5], fields[9:13]...)
|
||||
statPointers := []*uint64{
|
||||
&i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
|
||||
&i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
|
||||
}
|
||||
|
||||
err := setInterfaceStatValues(statFields, statPointers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
|
||||
}
|
||||
|
||||
stats = append(stats, i)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func setInterfaceStatValues(fields []string, pointers []*uint64) error {
|
||||
for i, v := range fields {
|
||||
val, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pointers[i] = val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
||||
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
tcpStats, err := scanTcpStats(tcpStatsFile)
|
||||
if err != nil {
|
||||
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
||||
}
|
||||
|
||||
return tcpStats, nil
|
||||
}
|
||||
|
||||
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
|
||||
var stats info.TcpStat
|
||||
|
||||
data, err := ioutil.ReadFile(tcpStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
||||
}
|
||||
|
||||
tcpStateMap := map[string]uint64{
|
||||
"01": 0, //ESTABLISHED
|
||||
"02": 0, //SYN_SENT
|
||||
"03": 0, //SYN_RECV
|
||||
"04": 0, //FIN_WAIT1
|
||||
"05": 0, //FIN_WAIT2
|
||||
"06": 0, //TIME_WAIT
|
||||
"07": 0, //CLOSE
|
||||
"08": 0, //CLOSE_WAIT
|
||||
"09": 0, //LAST_ACK
|
||||
"0A": 0, //LISTEN
|
||||
"0B": 0, //CLOSING
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
state := strings.Fields(line)
|
||||
// TCP state is the 4th field.
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
tcpState := state[3]
|
||||
_, ok := tcpStateMap[tcpState]
|
||||
if !ok {
|
||||
return stats, fmt.Errorf("invalid TCP stats line: %v", line)
|
||||
}
|
||||
tcpStateMap[tcpState]++
|
||||
}
|
||||
|
||||
stats = info.TcpStat{
|
||||
Established: tcpStateMap["01"],
|
||||
SynSent: tcpStateMap["02"],
|
||||
SynRecv: tcpStateMap["03"],
|
||||
FinWait1: tcpStateMap["04"],
|
||||
FinWait2: tcpStateMap["05"],
|
||||
TimeWait: tcpStateMap["06"],
|
||||
Close: tcpStateMap["07"],
|
||||
CloseWait: tcpStateMap["08"],
|
||||
LastAck: tcpStateMap["09"],
|
||||
Listen: tcpStateMap["0A"],
|
||||
Closing: tcpStateMap["0B"],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUdpStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetProcesses() ([]int, error) {
|
||||
pids, err := h.cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func minUint32(x, y uint32) uint32 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// var to allow unit tests to stub it out
|
||||
var numCpusFunc = getNumberOnlineCPUs
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
|
||||
if !withPerCPU {
|
||||
return
|
||||
}
|
||||
if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
|
||||
// libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
|
||||
// cpuacct subsystem.
|
||||
return
|
||||
}
|
||||
|
||||
numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
|
||||
// Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
|
||||
// the percpu usage information includes extra zero values for all additional
|
||||
// possible CPUs. This is to allow statistic collection after CPU-hotplug.
|
||||
// We intentionally ignore these extra zeroes.
|
||||
numActual, err := numCpusFunc()
|
||||
if err != nil {
|
||||
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
||||
numActual = numPossible
|
||||
}
|
||||
if numActual > numPossible {
|
||||
// The real number of cores should never be greater than the number of
|
||||
// datapoints reported in cpu usage.
|
||||
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
||||
}
|
||||
numActual = minUint32(numPossible, numActual)
|
||||
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
|
||||
|
||||
for i := uint32(0); i < numActual; i++ {
|
||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Copied from
|
||||
// https://github.com/moby/moby/blob/8b1adf55c2af329a4334f21d9444d6a169000c81/daemon/stats/collector_unix.go#L73
|
||||
// Apache 2.0, Copyright Docker, Inc.
|
||||
func getNumberOnlineCPUs() (uint32, error) {
|
||||
i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
|
||||
// According to POSIX - errno is undefined after successful
|
||||
// sysconf, and can be non-zero in several cases, so look for
|
||||
// error in returned value not in errno.
|
||||
// (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
|
||||
if i == -1 {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
}
|
||||
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
} else {
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgmajfault = v
|
||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||
}
|
||||
|
||||
workingSet := ret.Memory.Usage
|
||||
if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok {
|
||||
if workingSet < v {
|
||||
workingSet = 0
|
||||
} else {
|
||||
workingSet -= v
|
||||
}
|
||||
}
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||
ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
|
||||
for i := range libcontainerStats.Interfaces {
|
||||
ret.Network.Interfaces[i] = info.InterfaceStats{
|
||||
Name: libcontainerStats.Interfaces[i].Name,
|
||||
RxBytes: libcontainerStats.Interfaces[i].RxBytes,
|
||||
RxPackets: libcontainerStats.Interfaces[i].RxPackets,
|
||||
RxErrors: libcontainerStats.Interfaces[i].RxErrors,
|
||||
RxDropped: libcontainerStats.Interfaces[i].RxDropped,
|
||||
TxBytes: libcontainerStats.Interfaces[i].TxBytes,
|
||||
TxPackets: libcontainerStats.Interfaces[i].TxPackets,
|
||||
TxErrors: libcontainerStats.Interfaces[i].TxErrors,
|
||||
TxDropped: libcontainerStats.Interfaces[i].TxDropped,
|
||||
}
|
||||
}
|
||||
|
||||
// Add to base struct for backwards compatibility.
|
||||
if len(ret.Network.Interfaces) > 0 {
|
||||
ret.Network.InterfaceStats = ret.Network.Interfaces[0]
|
||||
}
|
||||
}
|
||||
|
||||
func newContainerStats(libcontainerStats *libcontainer.Stats, withPerCPU bool) *info.ContainerStats {
|
||||
ret := &info.ContainerStats{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if s := libcontainerStats.CgroupStats; s != nil {
|
||||
setCpuStats(s, ret, withPerCPU)
|
||||
setDiskIoStats(s, ret)
|
||||
setMemoryStats(s, ret)
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
setNetworkStats(libcontainerStats, ret)
|
||||
}
|
||||
return ret
|
||||
}
|
467
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
467
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -15,29 +15,14 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type CgroupSubsystems struct {
|
||||
// Cgroup subsystem mounts.
|
||||
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
|
||||
@@ -104,309 +89,6 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
||||
"devices": {},
|
||||
}
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) (*info.ContainerStats, error) {
|
||||
cgroupStats, err := cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
withPerCPU := !ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if pid == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(rootFs, pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(rootFs, pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp = t
|
||||
}
|
||||
|
||||
t6, err := tcpStatsFromProc(rootFs, pid, "net/tcp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(rootFs, pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(rootFs, pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||
|
||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||
if err != nil {
|
||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||
}
|
||||
|
||||
return ifaceStats, nil
|
||||
}
|
||||
|
||||
var (
|
||||
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
||||
)
|
||||
|
||||
func isIgnoredDevice(ifName string) bool {
|
||||
for _, prefix := range ignoredDevicePrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||
file, err := os.Open(netStatsFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
// Discard header lines
|
||||
for i := 0; i < 2; i++ {
|
||||
if b := scanner.Scan(); !b {
|
||||
return nil, scanner.Err()
|
||||
}
|
||||
}
|
||||
|
||||
stats := []info.InterfaceStats{}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.Replace(line, ":", "", -1)
|
||||
|
||||
fields := strings.Fields(line)
|
||||
// If the format of the line is invalid then don't trust any of the stats
|
||||
// in this file.
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid interface stats line: %v", line)
|
||||
}
|
||||
|
||||
devName := fields[0]
|
||||
if isIgnoredDevice(devName) {
|
||||
continue
|
||||
}
|
||||
|
||||
i := info.InterfaceStats{
|
||||
Name: devName,
|
||||
}
|
||||
|
||||
statFields := append(fields[1:5], fields[9:13]...)
|
||||
statPointers := []*uint64{
|
||||
&i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
|
||||
&i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
|
||||
}
|
||||
|
||||
err := setInterfaceStatValues(statFields, statPointers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
|
||||
}
|
||||
|
||||
stats = append(stats, i)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func setInterfaceStatValues(fields []string, pointers []*uint64) error {
|
||||
for i, v := range fields {
|
||||
val, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pointers[i] = val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
||||
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
tcpStats, err := scanTcpStats(tcpStatsFile)
|
||||
if err != nil {
|
||||
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
||||
}
|
||||
|
||||
return tcpStats, nil
|
||||
}
|
||||
|
||||
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
|
||||
var stats info.TcpStat
|
||||
|
||||
data, err := ioutil.ReadFile(tcpStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
||||
}
|
||||
|
||||
tcpStateMap := map[string]uint64{
|
||||
"01": 0, //ESTABLISHED
|
||||
"02": 0, //SYN_SENT
|
||||
"03": 0, //SYN_RECV
|
||||
"04": 0, //FIN_WAIT1
|
||||
"05": 0, //FIN_WAIT2
|
||||
"06": 0, //TIME_WAIT
|
||||
"07": 0, //CLOSE
|
||||
"08": 0, //CLOSE_WAIT
|
||||
"09": 0, //LAST_ACK
|
||||
"0A": 0, //LISTEN
|
||||
"0B": 0, //CLOSING
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
state := strings.Fields(line)
|
||||
// TCP state is the 4th field.
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
tcpState := state[3]
|
||||
_, ok := tcpStateMap[tcpState]
|
||||
if !ok {
|
||||
return stats, fmt.Errorf("invalid TCP stats line: %v", line)
|
||||
}
|
||||
tcpStateMap[tcpState]++
|
||||
}
|
||||
|
||||
stats = info.TcpStat{
|
||||
Established: tcpStateMap["01"],
|
||||
SynSent: tcpStateMap["02"],
|
||||
SynRecv: tcpStateMap["03"],
|
||||
FinWait1: tcpStateMap["04"],
|
||||
FinWait2: tcpStateMap["05"],
|
||||
TimeWait: tcpStateMap["06"],
|
||||
Close: tcpStateMap["07"],
|
||||
CloseWait: tcpStateMap["08"],
|
||||
LastAck: tcpStateMap["09"],
|
||||
Listen: tcpStateMap["0A"],
|
||||
Closing: tcpStateMap["0B"],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUdpStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats {
|
||||
disk := info.PerDiskStats{
|
||||
Major: major,
|
||||
@@ -456,152 +138,3 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
||||
}
|
||||
return DiskStatsCopy1(disk_stat)
|
||||
}
|
||||
|
||||
func minUint32(x, y uint32) uint32 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// var to allow unit tests to stub it out
|
||||
var numCpusFunc = getNumberOnlineCPUs
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
|
||||
if !withPerCPU {
|
||||
return
|
||||
}
|
||||
if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
|
||||
// libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
|
||||
// cpuacct subsystem.
|
||||
return
|
||||
}
|
||||
|
||||
numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
|
||||
// Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
|
||||
// the percpu usage information includes extra zero values for all additional
|
||||
// possible CPUs. This is to allow statistic collection after CPU-hotplug.
|
||||
// We intentionally ignore these extra zeroes.
|
||||
numActual, err := numCpusFunc()
|
||||
if err != nil {
|
||||
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
||||
numActual = numPossible
|
||||
}
|
||||
if numActual > numPossible {
|
||||
// The real number of cores should never be greater than the number of
|
||||
// datapoints reported in cpu usage.
|
||||
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
||||
}
|
||||
numActual = minUint32(numPossible, numActual)
|
||||
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
|
||||
|
||||
for i := uint32(0); i < numActual; i++ {
|
||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Copied from
|
||||
// https://github.com/moby/moby/blob/8b1adf55c2af329a4334f21d9444d6a169000c81/daemon/stats/collector_unix.go#L73
|
||||
// Apache 2.0, Copyright Docker, Inc.
|
||||
func getNumberOnlineCPUs() (uint32, error) {
|
||||
i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
|
||||
// According to POSIX - errno is undefined after successful
|
||||
// sysconf, and can be non-zero in several cases, so look for
|
||||
// error in returned value not in errno.
|
||||
// (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
|
||||
if i == -1 {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
}
|
||||
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
} else {
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgmajfault = v
|
||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||
}
|
||||
|
||||
workingSet := ret.Memory.Usage
|
||||
if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok {
|
||||
if workingSet < v {
|
||||
workingSet = 0
|
||||
} else {
|
||||
workingSet -= v
|
||||
}
|
||||
}
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||
ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
|
||||
for i := range libcontainerStats.Interfaces {
|
||||
ret.Network.Interfaces[i] = info.InterfaceStats{
|
||||
Name: libcontainerStats.Interfaces[i].Name,
|
||||
RxBytes: libcontainerStats.Interfaces[i].RxBytes,
|
||||
RxPackets: libcontainerStats.Interfaces[i].RxPackets,
|
||||
RxErrors: libcontainerStats.Interfaces[i].RxErrors,
|
||||
RxDropped: libcontainerStats.Interfaces[i].RxDropped,
|
||||
TxBytes: libcontainerStats.Interfaces[i].TxBytes,
|
||||
TxPackets: libcontainerStats.Interfaces[i].TxPackets,
|
||||
TxErrors: libcontainerStats.Interfaces[i].TxErrors,
|
||||
TxDropped: libcontainerStats.Interfaces[i].TxDropped,
|
||||
}
|
||||
}
|
||||
|
||||
// Add to base struct for backwards compatibility.
|
||||
if len(ret.Network.Interfaces) > 0 {
|
||||
ret.Network.InterfaceStats = ret.Network.Interfaces[0]
|
||||
}
|
||||
}
|
||||
|
||||
func newContainerStats(libcontainerStats *libcontainer.Stats, withPerCPU bool) *info.ContainerStats {
|
||||
ret := &info.ContainerStats{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if s := libcontainerStats.CgroupStats; s != nil {
|
||||
setCpuStats(s, ret, withPerCPU)
|
||||
setDiskIoStats(s, ret)
|
||||
setMemoryStats(s, ret)
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
setNetworkStats(libcontainerStats, ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
1
vendor/github.com/google/cadvisor/container/raw/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/raw/BUILD
generated
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/machine:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
],
|
||||
|
6
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
6
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
@@ -40,8 +40,8 @@ type rawFactory struct {
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
// Watcher for inotify events.
|
||||
watcher *common.InotifyWatcher
|
||||
// Watcher for fsnotify events.
|
||||
watcher *common.FsnotifyWatcher
|
||||
|
||||
// List of metrics to be ignored.
|
||||
ignoreMetrics map[container.MetricKind]struct{}
|
||||
@@ -78,7 +78,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
||||
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
watcher, err := common.NewFsnotifyWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
36
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
36
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/google/cadvisor/machine"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
@@ -34,32 +33,23 @@ import (
|
||||
type rawContainerHandler struct {
|
||||
// Name of the container for this handler.
|
||||
name string
|
||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
externalMounts []common.Mount
|
||||
|
||||
rootFs string
|
||||
|
||||
// Metrics to be ignored.
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
pid int
|
||||
libcontainerHandler *libcontainer.Handler
|
||||
}
|
||||
|
||||
func isRootCgroup(name string) bool {
|
||||
return name == "/"
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.FsnotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name)
|
||||
|
||||
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
|
||||
@@ -88,17 +78,15 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
pid = 1
|
||||
}
|
||||
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
|
||||
return &rawContainerHandler{
|
||||
name: name,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
fsInfo: fsInfo,
|
||||
externalMounts: externalMounts,
|
||||
rootFs: rootFs,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
pid: pid,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
externalMounts: externalMounts,
|
||||
libcontainerHandler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -231,7 +219,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
||||
stats, err := self.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@@ -267,7 +255,7 @@ func (self *rawContainerHandler) ListContainers(listType container.ListType) ([]
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return libcontainer.GetProcesses(self.cgroupManager)
|
||||
return self.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) Exists() bool {
|
||||
|
1
vendor/github.com/google/cadvisor/container/rkt/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/rkt/BUILD
generated
vendored
@@ -20,7 +20,6 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
|
79
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
79
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -28,48 +28,35 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type rktContainerHandler struct {
|
||||
rktClient rktapi.PublicAPIClient
|
||||
// Name of the container for this handler.
|
||||
name string
|
||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
// Whether this container has network isolation enabled.
|
||||
hasNetwork bool
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
rootFs string
|
||||
|
||||
isPod bool
|
||||
|
||||
aliases []string
|
||||
|
||||
pid int
|
||||
|
||||
rootfsStorageDir string
|
||||
|
||||
labels map[string]string
|
||||
|
||||
// Filesystem handler.
|
||||
fsHandler common.FsHandler
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
apiPod *rktapi.Pod
|
||||
|
||||
labels map[string]string
|
||||
|
||||
reference info.ContainerReference
|
||||
|
||||
libcontainerHandler *libcontainer.Handler
|
||||
}
|
||||
|
||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
@@ -122,30 +109,27 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
hasNetwork := false
|
||||
if isPod {
|
||||
hasNetwork = true
|
||||
}
|
||||
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
|
||||
rootfsStorageDir := getRootFs(rktPath, parsed)
|
||||
|
||||
containerReference := info.ContainerReference{
|
||||
Name: name,
|
||||
Aliases: aliases,
|
||||
Namespace: RktNamespace,
|
||||
}
|
||||
|
||||
handler := &rktContainerHandler{
|
||||
name: name,
|
||||
rktClient: rktClient,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
fsInfo: fsInfo,
|
||||
hasNetwork: hasNetwork,
|
||||
rootFs: rootFs,
|
||||
isPod: isPod,
|
||||
aliases: aliases,
|
||||
pid: pid,
|
||||
labels: labels,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
apiPod: apiPod,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
isPod: isPod,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
apiPod: apiPod,
|
||||
labels: labels,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
@@ -174,12 +158,7 @@ func createLabels(annotations []*rktapi.KeyValue) map[string]string {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return info.ContainerReference{
|
||||
Name: handler.name,
|
||||
Aliases: handler.aliases,
|
||||
Namespace: RktNamespace,
|
||||
Labels: handler.labels,
|
||||
}, nil
|
||||
return handler.reference, nil
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) Start() {
|
||||
@@ -191,7 +170,7 @@ func (handler *rktContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasNetwork := handler.hasNetwork && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasNetwork := handler.isPod && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
|
||||
spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||
@@ -243,7 +222,7 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := libcontainer.GetStats(handler.cgroupManager, handler.rootFs, handler.pid, handler.ignoreMetrics)
|
||||
stats, err := handler.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@@ -275,7 +254,7 @@ func (self *rktContainerHandler) GetContainerIPAddress() string {
|
||||
func (handler *rktContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
path, ok := handler.cgroupPaths[resource]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, handler.name)
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, handler.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
@@ -285,11 +264,11 @@ func (handler *rktContainerHandler) GetContainerLabels() map[string]string {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
return common.ListContainers(handler.name, handler.cgroupPaths, listType)
|
||||
return common.ListContainers(handler.reference.Name, handler.cgroupPaths, listType)
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return libcontainer.GetProcesses(handler.cgroupManager)
|
||||
return handler.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) Exists() bool {
|
||||
|
4
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
4
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -554,7 +554,7 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
cmd := exec.Command("nice", "-n", "19", "du", "-s", dir)
|
||||
cmd := exec.Command("ionice", "-c3", "nice", "-n", "19", "du", "-s", dir)
|
||||
stdoutp, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to setup stdout for cmd %v - %v", cmd.Args, err)
|
||||
@@ -601,7 +601,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
findCmd := exec.Command("find", dir, "-xdev", "-printf", ".")
|
||||
findCmd := exec.Command("ionice", "-c3", "nice", "-n", "19", "find", dir, "-xdev", "-printf", ".")
|
||||
findCmd.Stdout, findCmd.Stderr = &counter, &stderr
|
||||
if err := findCmd.Start(); err != nil {
|
||||
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
|
||||
|
19
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
19
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -85,8 +85,6 @@ type ContainerReference struct {
|
||||
// Namespace under which the aliases of a container are unique.
|
||||
// An example of a namespace is "docker" for Docker containers.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// Sorts by container name.
|
||||
@@ -295,10 +293,23 @@ type CpuCFS struct {
|
||||
ThrottledTime uint64 `json:"throttled_time"`
|
||||
}
|
||||
|
||||
// Cpu Aggregated scheduler statistics
|
||||
type CpuSchedstat struct {
|
||||
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
|
||||
|
||||
// time spent on the cpu
|
||||
RunTime uint64 `json:"run_time"`
|
||||
// time spent waiting on a runqueue
|
||||
RunqueueTime uint64 `json:"runqueue_time"`
|
||||
// # of timeslices run on this cpu
|
||||
RunPeriods uint64 `json:"run_periods"`
|
||||
}
|
||||
|
||||
// All CPU usage metrics are cumulative from the creation of the container
|
||||
type CpuStats struct {
|
||||
Usage CpuUsage `json:"usage"`
|
||||
CFS CpuCFS `json:"cfs"`
|
||||
Usage CpuUsage `json:"usage"`
|
||||
CFS CpuCFS `json:"cfs"`
|
||||
Schedstat CpuSchedstat `json:"schedstat"`
|
||||
// Smoothed average of number of runnable threads x 1000.
|
||||
// We multiply by thousand to avoid using floats, but preserving precision.
|
||||
// Load is smoothed over the last 10 seconds. Instantaneous value can be read
|
||||
|
1
vendor/github.com/google/cadvisor/machine/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/machine/BUILD
generated
vendored
@@ -9,6 +9,7 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/machine",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/docker/pkg/parsers/operatingsystem:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
|
18
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
18
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/cloudinfo"
|
||||
@@ -173,20 +174,11 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
}
|
||||
|
||||
func ContainerOsVersion() string {
|
||||
container_os := "Unknown"
|
||||
os_release, err := ioutil.ReadFile("/etc/os-release")
|
||||
if err == nil {
|
||||
// We might be running in a busybox or some hand-crafted image.
|
||||
// It's useful to know why cadvisor didn't come up.
|
||||
for _, line := range strings.Split(string(os_release), "\n") {
|
||||
parsed := strings.Split(line, "\"")
|
||||
if len(parsed) == 3 && parsed[0] == "PRETTY_NAME=" {
|
||||
container_os = parsed[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
os, err := operatingsystem.GetOperatingSystem()
|
||||
if err != nil {
|
||||
os = "Unknown"
|
||||
}
|
||||
return container_os
|
||||
return os
|
||||
}
|
||||
|
||||
func KernelVersion() string {
|
||||
|
7
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
7
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -615,7 +615,12 @@ func (c *containerData) updateStats() error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = c.memoryCache.AddStats(ref, stats)
|
||||
|
||||
cInfo := info.ContainerInfo{
|
||||
ContainerReference: ref,
|
||||
}
|
||||
|
||||
err = c.memoryCache.AddStats(&cInfo, stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -242,7 +242,7 @@ func retryDockerStatus() info.DockerStatus {
|
||||
for {
|
||||
ctx, _ := context.WithTimeout(context.Background(), startupTimeout)
|
||||
dockerStatus, err := docker.StatusWithContext(ctx)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
return dockerStatus
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
@@ -6,11 +6,11 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/manager/watcher/raw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/golang.org/x/exp/inotify:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
41
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
41
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
@@ -27,8 +27,8 @@ import (
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/exp/inotify"
|
||||
)
|
||||
|
||||
type rawContainerWatcher struct {
|
||||
@@ -37,8 +37,8 @@ type rawContainerWatcher struct {
|
||||
|
||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
||||
|
||||
// Inotify event watcher.
|
||||
watcher *common.InotifyWatcher
|
||||
// Fsnotify event watcher.
|
||||
watcher *common.FsnotifyWatcher
|
||||
|
||||
// Signal for watcher thread to stop.
|
||||
stopWatcher chan error
|
||||
@@ -53,7 +53,7 @@ func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
|
||||
return nil, fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
watcher, err := common.NewFsnotifyWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
|
||||
func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error {
|
||||
// Watch this container (all its cgroups) and all subdirectories.
|
||||
for _, cgroupPath := range self.cgroupPaths {
|
||||
_, err := self.watchDirectory(cgroupPath, "/")
|
||||
_, err := self.watchDirectory(events, cgroupPath, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func (self *rawContainerWatcher) Stop() error {
|
||||
|
||||
// Watches the specified directory and all subdirectories. Returns whether the path was
|
||||
// already being watched and an error (if any).
|
||||
func (self *rawContainerWatcher) watchDirectory(dir string, containerName string) (bool, error) {
|
||||
func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEvent, dir string, containerName string) (bool, error) {
|
||||
alreadyWatching, err := self.watcher.AddWatch(containerName, dir)
|
||||
if err != nil {
|
||||
return alreadyWatching, err
|
||||
@@ -121,7 +121,7 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
if cleanup {
|
||||
_, err := self.watcher.RemoveWatch(containerName, dir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
glog.Warningf("Failed to remove fsnotify watch for %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -135,7 +135,8 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
entryPath := path.Join(dir, entry.Name())
|
||||
_, err = self.watchDirectory(entryPath, path.Join(containerName, entry.Name()))
|
||||
subcontainerName := path.Join(containerName, entry.Name())
|
||||
alreadyWatchingSubDir, err := self.watchDirectory(events, entryPath, subcontainerName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to watch directory %q: %v", entryPath, err)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -145,6 +146,16 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
}
|
||||
return alreadyWatching, err
|
||||
}
|
||||
// since we already missed the creation event for this directory, publish an event here.
|
||||
if !alreadyWatchingSubDir {
|
||||
go func() {
|
||||
events <- watcher.ContainerEvent{
|
||||
EventType: watcher.ContainerAdd,
|
||||
Name: subcontainerName,
|
||||
WatchSource: watcher.Raw,
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,18 +163,16 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
return alreadyWatching, nil
|
||||
}
|
||||
|
||||
func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the inotify event type to a container create or delete.
|
||||
func (self *rawContainerWatcher) processEvent(event fsnotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the fsnotify event type to a container create or delete.
|
||||
var eventType watcher.ContainerEventType
|
||||
switch {
|
||||
case (event.Mask & inotify.IN_CREATE) > 0:
|
||||
case event.Op == fsnotify.Create:
|
||||
eventType = watcher.ContainerAdd
|
||||
case (event.Mask & inotify.IN_DELETE) > 0:
|
||||
case event.Op == fsnotify.Remove:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.IN_MOVED_FROM) > 0:
|
||||
case event.Op == fsnotify.Rename:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.IN_MOVED_TO) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
default:
|
||||
// Ignore other events.
|
||||
return nil
|
||||
@@ -186,7 +195,7 @@ func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan
|
||||
switch eventType {
|
||||
case watcher.ContainerAdd:
|
||||
// New container was created, watch it.
|
||||
alreadyWatched, err := self.watchDirectory(event.Name, containerName)
|
||||
alreadyWatched, err := self.watchDirectory(events, event.Name, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
21
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
21
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -197,6 +197,27 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_schedstat_run_seconds_total",
|
||||
help: "Time duration the processes of the container have run on the CPU.",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunTime) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_schedstat_runqueue_seconds_total",
|
||||
help: "Time duration processes of the container have been waiting on a runqueue.",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunqueueTime) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_schedstat_run_periods_total",
|
||||
help: "Number of times processes of the cgroup have run on the cpu",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunPeriods)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_load_average_10s",
|
||||
help: "Value of container cpu load average over the last 10 seconds.",
|
||||
|
2
vendor/github.com/google/cadvisor/storage/storage.go
generated
vendored
2
vendor/github.com/google/cadvisor/storage/storage.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
type StorageDriver interface {
|
||||
AddStats(ref info.ContainerReference, stats *info.ContainerStats) error
|
||||
AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error
|
||||
|
||||
// Close will clear the state of the storage driver. The elements
|
||||
// stored in the underlying storage may or may not be deleted depending
|
||||
|
19
vendor/github.com/google/cadvisor/validate/validate.go
generated
vendored
19
vendor/github.com/google/cadvisor/validate/validate.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
@@ -133,6 +134,23 @@ func areCgroupsPresent(available map[string]int, desired []string) (bool, string
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func validateCpuCfsBandwidth(available_cgroups map[string]int) string {
|
||||
ok, _ := areCgroupsPresent(available_cgroups, []string{"cpu"})
|
||||
if !ok {
|
||||
return "\tCpu cfs bandwidth status unknown: cpu cgroup not enabled.\n"
|
||||
}
|
||||
mnt, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return "\tCpu cfs bandwidth status unknown: cpu cgroup not mounted.\n"
|
||||
}
|
||||
_, err = os.Stat(path.Join(mnt, "cpu.cfs_period_us"))
|
||||
if os.IsNotExist(err) {
|
||||
return "\tCpu cfs bandwidth is disabled. Recompile kernel with \"CONFIG_CFS_BANDWIDTH\" enabled.\n"
|
||||
}
|
||||
|
||||
return "\tCpu cfs bandwidth is enabled.\n"
|
||||
}
|
||||
|
||||
func validateMemoryAccounting(available_cgroups map[string]int) string {
|
||||
ok, _ := areCgroupsPresent(available_cgroups, []string{"memory"})
|
||||
if !ok {
|
||||
@@ -181,6 +199,7 @@ func validateCgroups() (string, string) {
|
||||
out = fmt.Sprintf("Available cgroups: %v\n", available_cgroups)
|
||||
out += desc
|
||||
out += validateMemoryAccounting(available_cgroups)
|
||||
out += validateCpuCfsBandwidth(available_cgroups)
|
||||
return Recommended, out
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user