Update vendor dependencies
Change-Id: I3b1ca9f2687388c831d9d46a4e1de413ffae06ac
This commit is contained in:
2
vendor/github.com/google/cadvisor/accelerators/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/accelerators/BUILD
generated
vendored
@@ -10,9 +10,9 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/accelerators",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/mindprince/gonvml:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
20
vendor/github.com/google/cadvisor/accelerators/nvidia.go
generated
vendored
20
vendor/github.com/google/cadvisor/accelerators/nvidia.go
generated
vendored
@@ -26,8 +26,8 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/mindprince/gonvml"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type NvidiaManager struct {
|
||||
@@ -50,7 +50,7 @@ const nvidiaVendorId = "0x10de"
|
||||
// Setup initializes NVML if nvidia devices are present on the node.
|
||||
func (nm *NvidiaManager) Setup() {
|
||||
if !detectDevices(nvidiaVendorId) {
|
||||
glog.V(4).Info("No NVIDIA devices found.")
|
||||
klog.V(4).Info("No NVIDIA devices found.")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func (nm *NvidiaManager) Setup() {
|
||||
func detectDevices(vendorId string) bool {
|
||||
devices, err := ioutil.ReadDir(sysFsPCIDevicesPath)
|
||||
if err != nil {
|
||||
glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
|
||||
klog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -71,11 +71,11 @@ func detectDevices(vendorId string) bool {
|
||||
vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor")
|
||||
content, err := ioutil.ReadFile(vendorPath)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
|
||||
klog.V(4).Infof("Error while reading %q: %v", vendorPath, err)
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) {
|
||||
glog.V(3).Infof("Found device with vendorId %q", vendorId)
|
||||
klog.V(3).Infof("Found device with vendorId %q", vendorId)
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -88,26 +88,26 @@ var initializeNVML = func(nm *NvidiaManager) {
|
||||
if err := gonvml.Initialize(); err != nil {
|
||||
// This is under a logging level because otherwise we may cause
|
||||
// log spam if the drivers/nvml is not installed on the system.
|
||||
glog.V(4).Infof("Could not initialize NVML: %v", err)
|
||||
klog.V(4).Infof("Could not initialize NVML: %v", err)
|
||||
return
|
||||
}
|
||||
nm.nvmlInitialized = true
|
||||
numDevices, err := gonvml.DeviceCount()
|
||||
if err != nil {
|
||||
glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err)
|
||||
klog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
|
||||
klog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices)
|
||||
nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices)
|
||||
for i := 0; i < int(numDevices); i++ {
|
||||
device, err := gonvml.DeviceHandleByIndex(uint(i))
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get nvidia device handle %d: %v", i, err)
|
||||
klog.Warningf("Failed to get nvidia device handle %d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
minorNumber, err := device.MinorNumber()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get nvidia device minor number: %v", err)
|
||||
klog.Warningf("Failed to get nvidia device minor number: %v", err)
|
||||
continue
|
||||
}
|
||||
nm.nvidiaDevices[int(minorNumber)] = device
|
||||
|
2
vendor/github.com/google/cadvisor/cache/memory/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/cache/memory/BUILD
generated
vendored
@@ -7,10 +7,10 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/cache/memory",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/storage:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
6
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
@@ -23,13 +23,13 @@ import (
|
||||
"github.com/google/cadvisor/storage"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ErrDataNotFound is the error resulting if failed to find a container in memory cache.
|
||||
var ErrDataNotFound = errors.New("unable to find data in memory cache")
|
||||
|
||||
// TODO(vmarmol): See about refactoring this class, we have an unecessary redirection of containerCache and InMemoryCache.
|
||||
// TODO(vmarmol): See about refactoring this class, we have an unnecessary redirection of containerCache and InMemoryCache.
|
||||
// containerCache is used to store per-container information
|
||||
type containerCache struct {
|
||||
ref info.ContainerReference
|
||||
@@ -91,7 +91,7 @@ func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.Conta
|
||||
// may want to start a pool of goroutines to do write
|
||||
// operations.
|
||||
if err := self.backend.AddStats(cInfo, stats); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return cstore.AddStats(stats)
|
||||
|
2
vendor/github.com/google/cadvisor/container/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/BUILD
generated
vendored
@@ -10,9 +10,9 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
6
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
@@ -12,12 +12,14 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container/common",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/golang.org/x/exp/inotify:go_default_library",
|
||||
"//vendor/github.com/karrick/godirwalk:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/github.com/sigma/go-inotify:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/container/common/fsHandler.go
generated
vendored
6
vendor/github.com/google/cadvisor/container/common/fsHandler.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/google/cadvisor/fs"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type FsHandler interface {
|
||||
@@ -118,7 +118,7 @@ func (fh *realFsHandler) trackUsage() {
|
||||
case <-time.After(fh.period):
|
||||
start := time.Now()
|
||||
if err := fh.update(); err != nil {
|
||||
glog.Errorf("failed to collect filesystem stats - %v", err)
|
||||
klog.Errorf("failed to collect filesystem stats - %v", err)
|
||||
fh.period = fh.period * 2
|
||||
if fh.period > maxBackoffFactor*fh.minPeriod {
|
||||
fh.period = maxBackoffFactor * fh.minPeriod
|
||||
@@ -132,7 +132,7 @@ func (fh *realFsHandler) trackUsage() {
|
||||
// if the long duration is persistent either because of slow
|
||||
// disk or lots of containers.
|
||||
longOp = longOp + time.Second
|
||||
glog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp)
|
||||
klog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
42
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
42
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -26,8 +26,10 @@ import (
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func DebugInfo(watches map[string][]string) map[string][]string {
|
||||
@@ -85,7 +87,7 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
if quota != "" && quota != "-1" {
|
||||
val, err := strconv.ParseUint(quota, 10, 64)
|
||||
if err != nil {
|
||||
glog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
|
||||
klog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
|
||||
}
|
||||
spec.Cpu.Quota = val
|
||||
}
|
||||
@@ -132,7 +134,7 @@ func readString(dirpath string, file string) string {
|
||||
if err != nil {
|
||||
// Ignore non-existent files
|
||||
if !os.IsNotExist(err) {
|
||||
glog.Errorf("readString: Failed to read %q: %s", cgroupFile, err)
|
||||
klog.Errorf("readString: Failed to read %q: %s", cgroupFile, err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -147,7 +149,7 @@ func readUInt64(dirpath string, file string) uint64 {
|
||||
|
||||
val, err := strconv.ParseUint(out, 10, 64)
|
||||
if err != nil {
|
||||
glog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err)
|
||||
klog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -156,26 +158,34 @@ func readUInt64(dirpath string, file string) uint64 {
|
||||
|
||||
// Lists all directories under "path" and outputs the results as children of "parent".
|
||||
func ListDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error {
|
||||
entries, err := ioutil.ReadDir(dirpath)
|
||||
buf := make([]byte, godirwalk.DefaultScratchBufferSize)
|
||||
return listDirectories(dirpath, parent, recursive, output, buf)
|
||||
}
|
||||
|
||||
func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}, buf []byte) error {
|
||||
dirents, err := godirwalk.ReadDirents(dirpath, buf)
|
||||
if err != nil {
|
||||
// Ignore if this hierarchy does not exist.
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
for _, dirent := range dirents {
|
||||
// We only grab directories.
|
||||
if entry.IsDir() {
|
||||
name := path.Join(parent, entry.Name())
|
||||
output[name] = struct{}{}
|
||||
if !dirent.IsDir() {
|
||||
continue
|
||||
}
|
||||
dirname := dirent.Name()
|
||||
|
||||
// List subcontainers if asked to.
|
||||
if recursive {
|
||||
err := ListDirectories(path.Join(dirpath, entry.Name()), name, true, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := path.Join(parent, dirname)
|
||||
output[name] = struct{}{}
|
||||
|
||||
// List subcontainers if asked to.
|
||||
if recursive {
|
||||
err := listDirectories(path.Join(dirpath, dirname), name, true, output, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/google/cadvisor/container/common/inotify_watcher.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/common/inotify_watcher.go
generated
vendored
@@ -17,7 +17,7 @@ package common
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/inotify"
|
||||
inotify "github.com/sigma/go-inotify"
|
||||
)
|
||||
|
||||
// Watcher for container-related inotify events in the cgroup hierarchy.
|
||||
@@ -78,7 +78,7 @@ func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
// If we don't have a watch registed for this, just return.
|
||||
// If we don't have a watch registered for this, just return.
|
||||
cgroupsWatched, ok := iw.containersWatched[containerName]
|
||||
if !ok {
|
||||
return false, nil
|
||||
|
2
vendor/github.com/google/cadvisor/container/containerd/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/containerd/BUILD
generated
vendored
@@ -20,7 +20,6 @@ go_library(
|
||||
"//vendor/github.com/containerd/containerd/errdefs:go_default_library",
|
||||
"//vendor/github.com/containerd/containerd/namespaces:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -32,6 +31,7 @@ go_library(
|
||||
"//vendor/github.com/opencontainers/runtime-spec/specs-go:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
@@ -21,8 +21,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
@@ -133,7 +133,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering containerd factory")
|
||||
klog.V(1).Infof("Registering containerd factory")
|
||||
f := &containerdFactory{
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
client: client,
|
||||
|
2
vendor/github.com/google/cadvisor/container/crio/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/crio/BUILD
generated
vendored
@@ -11,7 +11,6 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container/crio",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -20,6 +19,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// The namespace under which crio aliases are unique.
|
||||
@@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering CRI-O factory")
|
||||
klog.V(1).Infof("Registering CRI-O factory")
|
||||
f := &crioFactory{
|
||||
client: client,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
|
2
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
2
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
@@ -176,7 +176,7 @@ func newCrioContainerHandler(
|
||||
}
|
||||
// TODO for env vars we wanted to show from container.Config.Env from whitelist
|
||||
//for _, exposedEnv := range metadataEnvs {
|
||||
//glog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
|
||||
//klog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
|
||||
//}
|
||||
|
||||
return handler, nil
|
||||
|
2
vendor/github.com/google/cadvisor/container/docker/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/docker/BUILD
generated
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"//vendor/github.com/docker/docker/api/types/container:go_default_library",
|
||||
"//vendor/github.com/docker/docker/client:go_default_library",
|
||||
"//vendor/github.com/docker/go-connections/tlsconfig:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -31,6 +30,7 @@ go_library(
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
8
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
8
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/google/cadvisor/zfs"
|
||||
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint")
|
||||
@@ -337,7 +337,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
klog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
// Safe to ignore error - driver status should always be populated.
|
||||
@@ -349,11 +349,11 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
|
||||
if storageDriver(dockerInfo.Driver) == zfsStorageDriver {
|
||||
zfsWatcher, err = startZfsWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("zfs filesystem stats will not be reported: %v", err)
|
||||
klog.Errorf("zfs filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering Docker factory")
|
||||
klog.V(1).Infof("Registering Docker factory")
|
||||
f := &dockerFactory{
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
client: client,
|
||||
|
8
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
8
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -34,10 +34,10 @@ import (
|
||||
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/golang/glog"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -228,7 +228,7 @@ func newDockerContainerHandler(
|
||||
handler.labels["restartcount"] = strconv.Itoa(ctnr.RestartCount)
|
||||
}
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
// Obtain the IP address for the container.
|
||||
// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
|
||||
// This happens in cases such as kubernetes where the containers doesn't have an IP address itself and we need to use the pod's address
|
||||
ipAddress := ctnr.NetworkSettings.IPAddress
|
||||
@@ -309,7 +309,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage {
|
||||
// TODO: ideally we should keep track of how many times we failed to get the usage for this
|
||||
// device vs how many refreshes of the cache there have been, and display an error e.g. if we've
|
||||
// had at least 1 refresh and we still can't find the device.
|
||||
glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err)
|
||||
klog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err)
|
||||
} else {
|
||||
usage.BaseUsageBytes = thinPoolUsage
|
||||
usage.TotalUsageBytes += thinPoolUsage
|
||||
@@ -319,7 +319,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage {
|
||||
if h.zfsWatcher != nil {
|
||||
zfsUsage, err := h.zfsWatcher.GetUsage(h.zfsFilesystem)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err)
|
||||
klog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err)
|
||||
} else {
|
||||
usage.BaseUsageBytes = zfsUsage
|
||||
usage.TotalUsageBytes += zfsUsage
|
||||
|
11
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
11
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type ContainerHandlerFactory interface {
|
||||
@@ -53,6 +53,7 @@ const (
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AcceleratorUsageMetrics MetricKind = "accelerator"
|
||||
AppMetrics MetricKind = "app"
|
||||
ProcessMetrics MetricKind = "process"
|
||||
)
|
||||
|
||||
func (mk MetricKind) String() string {
|
||||
@@ -105,18 +106,18 @@ func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, in
|
||||
for _, factory := range factories[watchType] {
|
||||
canHandle, canAccept, err := factory.CanHandleAndAccept(name)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
|
||||
klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
|
||||
}
|
||||
if canHandle {
|
||||
if !canAccept {
|
||||
glog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
|
||||
klog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
|
||||
return nil, false, nil
|
||||
}
|
||||
glog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
klog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
handle, err := factory.NewContainerHandler(name, inHostNamespace)
|
||||
return handle, canAccept, err
|
||||
} else {
|
||||
glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/container/libcontainer/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/libcontainer/BUILD
generated
vendored
@@ -11,11 +11,11 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container/libcontainer",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
67
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
67
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -29,9 +29,9 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"bytes"
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -72,11 +72,11 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
|
||||
klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
@@ -96,14 +96,14 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp = t
|
||||
}
|
||||
|
||||
t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
@@ -111,18 +111,30 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
|
||||
klog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
if h.includedMetrics.Has(container.ProcessMetrics) {
|
||||
paths := h.cgroupManager.GetPaths()
|
||||
path, ok := paths["cpu"]
|
||||
if !ok {
|
||||
klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
|
||||
} else {
|
||||
stats.Processes, err = processStatsFromProcs(h.rootFs, path)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get Process Stats: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
@@ -132,6 +144,41 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func processStatsFromProcs(rootFs string, cgroupPath string) (info.ProcessStats, error) {
|
||||
var fdCount uint64
|
||||
filePath := path.Join(cgroupPath, "cgroup.procs")
|
||||
out, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return info.ProcessStats{}, fmt.Errorf("couldn't open cpu cgroup procs file %v : %v", filePath, err)
|
||||
}
|
||||
|
||||
pids := strings.Split(string(out), "\n")
|
||||
|
||||
// EOL is also treated as a new line while reading "cgroup.procs" file with ioutil.ReadFile.
|
||||
// The last value is an empty string "". Ex: pids = ["22", "1223", ""]
|
||||
// Trim the last value
|
||||
if len(pids) != 0 && pids[len(pids)-1] == "" {
|
||||
pids = pids[:len(pids)-1]
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
dirPath := path.Join(rootFs, "/proc", pid, "fd")
|
||||
fds, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
|
||||
continue
|
||||
}
|
||||
fdCount += uint64(len(fds))
|
||||
}
|
||||
|
||||
processStats := info.ProcessStats{
|
||||
ProcessCount: uint64(len(pids)),
|
||||
FdCount: fdCount,
|
||||
}
|
||||
|
||||
return processStats, nil
|
||||
}
|
||||
|
||||
func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]*info.CpuSchedstat) (info.CpuSchedstat, error) {
|
||||
for _, pid := range pids {
|
||||
f, err := os.Open(path.Join(rootFs, "proc", strconv.Itoa(pid), "schedstat"))
|
||||
@@ -451,13 +498,13 @@ func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
// We intentionally ignore these extra zeroes.
|
||||
numActual, err := numCpusFunc()
|
||||
if err != nil {
|
||||
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
||||
klog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
||||
numActual = numPossible
|
||||
}
|
||||
if numActual > numPossible {
|
||||
// The real number of cores should never be greater than the number of
|
||||
// datapoints reported in cpu usage.
|
||||
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
||||
klog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
||||
}
|
||||
numActual = minUint32(numPossible, numActual)
|
||||
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
|
||||
|
4
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -19,8 +19,8 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type CgroupSubsystems struct {
|
||||
@@ -61,7 +61,7 @@ func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount) (CgroupSubsystems, er
|
||||
}
|
||||
if _, ok := mountPoints[subsystem]; ok {
|
||||
// duplicate mount for this subsystem; use the first one we saw
|
||||
glog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
|
||||
klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
|
||||
continue
|
||||
}
|
||||
if _, ok := recordedMountpoints[mount.Mountpoint]; !ok {
|
||||
|
2
vendor/github.com/google/cadvisor/container/mesos/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/mesos/BUILD
generated
vendored
@@ -14,7 +14,6 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/Rican7/retry:go_default_library",
|
||||
"//vendor/github.com/Rican7/retry/strategy:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -29,6 +28,7 @@ go_library(
|
||||
"//vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
23
vendor/github.com/google/cadvisor/container/mesos/client.go
generated
vendored
23
vendor/github.com/google/cadvisor/container/mesos/client.go
generated
vendored
@@ -70,6 +70,11 @@ func Client() (mesosAgentClient, error) {
|
||||
),
|
||||
}
|
||||
})
|
||||
|
||||
_, err := mesosClient.getVersion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get version")
|
||||
}
|
||||
return mesosClient, nil
|
||||
}
|
||||
|
||||
@@ -134,6 +139,20 @@ func (self *client) getContainer(id string) (*mContainer, error) {
|
||||
return nil, fmt.Errorf("can't locate container %s", id)
|
||||
}
|
||||
|
||||
func (self *client) getVersion() (string, error) {
|
||||
req := calls.NonStreaming(calls.GetVersion())
|
||||
result, err := self.fetchAndDecode(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get mesos version: %v", err)
|
||||
}
|
||||
version := result.GetVersion
|
||||
|
||||
if version == nil {
|
||||
return "", fmt.Errorf("failed to get mesos version")
|
||||
}
|
||||
return version.VersionInfo.Version, nil
|
||||
}
|
||||
|
||||
func (self *client) getContainers() (mContainers, error) {
|
||||
req := calls.NonStreaming(calls.GetContainers())
|
||||
result, err := self.fetchAndDecode(req)
|
||||
@@ -141,6 +160,10 @@ func (self *client) getContainers() (mContainers, error) {
|
||||
return nil, fmt.Errorf("failed to get mesos containers: %v", err)
|
||||
}
|
||||
cntrs := result.GetContainers
|
||||
|
||||
if cntrs == nil {
|
||||
return nil, fmt.Errorf("failed to get mesos containers")
|
||||
}
|
||||
return cntrs, nil
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/mesos/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/mesos/factory.go
generated
vendored
@@ -22,12 +22,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var MesosAgentAddress = flag.String("mesos_agent", "127.0.0.1:5051", "Mesos agent address")
|
||||
@@ -135,7 +135,7 @@ func Register(
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering mesos factory")
|
||||
klog.V(1).Infof("Registering mesos factory")
|
||||
factory := &mesosFactory{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
|
2
vendor/github.com/google/cadvisor/container/raw/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/raw/BUILD
generated
vendored
@@ -10,7 +10,6 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container/raw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -20,6 +19,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
watch "github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var dockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats")
|
||||
@@ -94,7 +94,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering Raw factory")
|
||||
klog.V(1).Infof("Registering Raw factory")
|
||||
factory := &rawFactory{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
fsInfo: fsInfo,
|
||||
|
6
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
6
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -25,9 +25,9 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/machine"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type rawContainerHandler struct {
|
||||
@@ -134,7 +134,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
// Get memory and swap limits of the running machine
|
||||
memLimit, err := machine.GetMachineMemoryCapacity()
|
||||
if err != nil {
|
||||
glog.Warningf("failed to obtain memory limit for machine container")
|
||||
klog.Warningf("failed to obtain memory limit for machine container")
|
||||
spec.HasMemory = false
|
||||
} else {
|
||||
spec.Memory.Limit = uint64(memLimit)
|
||||
@@ -144,7 +144,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
|
||||
swapLimit, err := machine.GetMachineSwapCapacity()
|
||||
if err != nil {
|
||||
glog.Warningf("failed to obtain swap limit for machine container")
|
||||
klog.Warningf("failed to obtain swap limit for machine container")
|
||||
} else {
|
||||
spec.Memory.SwapLimit = uint64(swapLimit)
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/container/rkt/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/rkt/BUILD
generated
vendored
@@ -14,7 +14,6 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
"//vendor/github.com/coreos/rkt/api/v1alpha:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
@@ -25,6 +24,7 @@ go_library(
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/rkt/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/rkt/factory.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const RktNamespace = "rkt"
|
||||
@@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
|
||||
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Registering Rkt factory")
|
||||
klog.V(1).Infof("Registering Rkt factory")
|
||||
factory := &rktFactory{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
fsInfo: fsInfo,
|
||||
|
4
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -27,9 +27,9 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type rktContainerHandler struct {
|
||||
@@ -89,7 +89,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
annotations := resp.Pod.Annotations
|
||||
if parsed.Container != "" { // As not empty string, an App container
|
||||
if contAnnotations, ok := findAnnotations(resp.Pod.Apps, parsed.Container); !ok {
|
||||
glog.Warningf("couldn't find app %v in pod", parsed.Container)
|
||||
klog.Warningf("couldn't find app %v in pod", parsed.Container)
|
||||
} else {
|
||||
annotations = append(annotations, contAnnotations...)
|
||||
}
|
||||
|
4
vendor/github.com/google/cadvisor/container/rkt/helpers.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/rkt/helpers.go
generated
vendored
@@ -21,8 +21,8 @@ import (
|
||||
"strings"
|
||||
|
||||
rktapi "github.com/coreos/rkt/api/v1alpha"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type parsedName struct {
|
||||
@@ -128,7 +128,7 @@ func getRootFs(root string, parsed *parsedName) string {
|
||||
|
||||
bytes, err := ioutil.ReadFile(tree)
|
||||
if err != nil {
|
||||
glog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err)
|
||||
klog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err)
|
||||
return ""
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/container/systemd/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/container/systemd/BUILD
generated
vendored
@@ -7,11 +7,11 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/container/systemd",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type systemdFactory struct{}
|
||||
@@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string {
|
||||
|
||||
// Register registers the systemd container factory.
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
glog.V(1).Infof("Registering systemd factory")
|
||||
klog.V(1).Infof("Registering systemd factory")
|
||||
factory := &systemdFactory{}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
return nil
|
||||
|
2
vendor/github.com/google/cadvisor/devicemapper/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/devicemapper/BUILD
generated
vendored
@@ -12,7 +12,7 @@ go_library(
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/google/cadvisor/devicemapper",
|
||||
importpath = "github.com/google/cadvisor/devicemapper",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = ["//vendor/k8s.io/klog:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
4
vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go
generated
vendored
4
vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// DmsetupClient is a low-level client for interacting with device mapper via
|
||||
@@ -58,6 +58,6 @@ func (c *defaultDmsetupClient) Status(deviceName string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (*defaultDmsetupClient) dmsetup(args ...string) ([]byte, error) {
|
||||
glog.V(5).Infof("running dmsetup %v", strings.Join(args, " "))
|
||||
klog.V(5).Infof("running dmsetup %v", strings.Join(args, " "))
|
||||
return exec.Command("dmsetup", args...).Output()
|
||||
}
|
||||
|
6
vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go
generated
vendored
6
vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// thinLsClient knows how to run a thin_ls very specific to CoW usage for
|
||||
@@ -53,7 +53,7 @@ var _ thinLsClient = &defaultThinLsClient{}
|
||||
|
||||
func (c *defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) {
|
||||
args := []string{"--no-headers", "-m", "-o", "DEV,EXCLUSIVE_BYTES", deviceName}
|
||||
glog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
|
||||
klog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
|
||||
|
||||
output, err := exec.Command(c.thinLsPath, args...).Output()
|
||||
if err != nil {
|
||||
@@ -80,7 +80,7 @@ func parseThinLsOutput(output []byte) map[string]uint64 {
|
||||
deviceID := fields[0]
|
||||
usage, err := strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error parsing thin_ls output: %v", err)
|
||||
klog.Warningf("unexpected error parsing thin_ls output: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
20
vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go
generated
vendored
20
vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ThinPoolWatcher maintains a cache of device name -> usage stats for a
|
||||
@@ -58,7 +58,7 @@ func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, erro
|
||||
func (w *ThinPoolWatcher) Start() {
|
||||
err := w.Refresh()
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
@@ -69,12 +69,12 @@ func (w *ThinPoolWatcher) Start() {
|
||||
start := time.Now()
|
||||
err = w.Refresh()
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
}
|
||||
|
||||
// print latency for refresh
|
||||
duration := time.Since(start)
|
||||
glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
|
||||
klog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error {
|
||||
}
|
||||
|
||||
if currentlyReserved {
|
||||
glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
|
||||
klog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
|
||||
_, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err)
|
||||
@@ -123,22 +123,22 @@ func (w *ThinPoolWatcher) Refresh() error {
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
|
||||
klog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
|
||||
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup
|
||||
// message'. It's not needed for thin pools.
|
||||
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {
|
||||
err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output))
|
||||
return err
|
||||
} else {
|
||||
glog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
|
||||
klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
glog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)
|
||||
klog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)
|
||||
w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
|
||||
}()
|
||||
|
||||
glog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice)
|
||||
klog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice)
|
||||
newCache, err := w.thinLsClient.ThinLs(w.metadataDevice)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error performing thin_ls on metadata device %v: %v", w.metadataDevice, err)
|
||||
@@ -157,7 +157,7 @@ const (
|
||||
// checkReservation checks to see whether the thin device is currently holding
|
||||
// userspace metadata.
|
||||
func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) {
|
||||
glog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
|
||||
klog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
|
||||
output, err := w.dmsetup.Status(poolName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
2
vendor/github.com/google/cadvisor/events/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/events/BUILD
generated
vendored
@@ -7,9 +7,9 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/events",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/events/handler.go
generated
vendored
6
vendor/github.com/google/cadvisor/events/handler.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type byTimestamp []*info.Event
|
||||
@@ -322,7 +322,7 @@ func (self *events) AddEvent(e *info.Event) error {
|
||||
for _, watchObject := range watchesToSend {
|
||||
watchObject.eventChannel.GetChannel() <- e
|
||||
}
|
||||
glog.V(4).Infof("Added event %v", e)
|
||||
klog.V(4).Infof("Added event %v", e)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -332,7 +332,7 @@ func (self *events) StopWatch(watchId int) {
|
||||
defer self.watcherLock.Unlock()
|
||||
_, ok := self.watchers[watchId]
|
||||
if !ok {
|
||||
glog.Errorf("Could not find watcher instance %v", watchId)
|
||||
klog.Errorf("Could not find watcher instance %v", watchId)
|
||||
}
|
||||
close(self.watchers[watchId].eventChannel.GetChannel())
|
||||
delete(self.watchers, watchId)
|
||||
|
2
vendor/github.com/google/cadvisor/fs/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/fs/BUILD
generated
vendored
@@ -12,11 +12,11 @@ go_library(
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/docker/docker/pkg/mount:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/devicemapper:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/docker:go_default_library",
|
||||
"//vendor/github.com/mistifyio/go-zfs:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
|
38
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
38
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -33,11 +33,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/devicemapper"
|
||||
"github.com/google/cadvisor/utils"
|
||||
dockerutil "github.com/google/cadvisor/utils/docker"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -114,9 +114,9 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
|
||||
fsUUIDToDeviceName, err := getFsUUIDToDeviceNameMap()
|
||||
if err != nil {
|
||||
// UUID is not always avaiable across different OS distributions.
|
||||
// UUID is not always available across different OS distributions.
|
||||
// Do not fail if there is an error.
|
||||
glog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
|
||||
klog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
|
||||
}
|
||||
|
||||
// Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher
|
||||
@@ -139,8 +139,8 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
fsInfo.addDockerImagesLabel(context, mounts)
|
||||
fsInfo.addCrioImagesLabel(context, mounts)
|
||||
|
||||
glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
|
||||
glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
|
||||
klog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
|
||||
klog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
|
||||
fsInfo.addSystemRootLabel(mounts)
|
||||
return fsInfo, nil
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) {
|
||||
path := filepath.Join(dir, file.Name())
|
||||
target, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to resolve symlink for %q", path)
|
||||
klog.Warningf("Failed to resolve symlink for %q", path)
|
||||
continue
|
||||
}
|
||||
device, err := filepath.Abs(filepath.Join(dir, target))
|
||||
@@ -213,7 +213,7 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
||||
if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mount)
|
||||
if err != nil {
|
||||
glog.Warningf("%s", err)
|
||||
klog.Warningf("%s", err)
|
||||
} else {
|
||||
mount.Major = major
|
||||
mount.Minor = minor
|
||||
@@ -278,7 +278,7 @@ func (self *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
|
||||
func (self *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
|
||||
dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.Docker)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
klog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
}
|
||||
if len(dockerDev) > 0 && dockerPartition != nil {
|
||||
self.partitions[dockerDev] = *dockerPartition
|
||||
@@ -405,7 +405,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
switch partition.fsType {
|
||||
case DeviceMapper.String():
|
||||
fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize)
|
||||
glog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
|
||||
klog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
|
||||
fs.Type = DeviceMapper
|
||||
case ZFS.String():
|
||||
fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
|
||||
@@ -418,11 +418,11 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
fs.InodesFree = &inodesFree
|
||||
fs.Type = VFS
|
||||
} else {
|
||||
glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
|
||||
klog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Stat fs failed. Error: %v", err)
|
||||
klog.V(4).Infof("Stat fs failed. Error: %v", err)
|
||||
} else {
|
||||
deviceSet[device] = struct{}{}
|
||||
fs.DeviceInfo = DeviceInfo{
|
||||
@@ -445,7 +445,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
file, err := os.Open(diskStatsFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
|
||||
klog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
|
||||
return diskStatsMap, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -551,7 +551,7 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
if found && mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mount)
|
||||
if err != nil {
|
||||
glog.Warningf("%s", err)
|
||||
klog.Warningf("%s", err)
|
||||
} else {
|
||||
return &DeviceInfo{mount.Source, uint(major), uint(minor)}, nil
|
||||
}
|
||||
@@ -583,12 +583,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
return 0, fmt.Errorf("failed to exec du - %v", err)
|
||||
}
|
||||
timer := time.AfterFunc(timeout, func() {
|
||||
glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
|
||||
klog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
|
||||
cmd.Process.Kill()
|
||||
})
|
||||
stdoutb, souterr := ioutil.ReadAll(stdoutp)
|
||||
if souterr != nil {
|
||||
glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
|
||||
klog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
|
||||
}
|
||||
stderrb, _ := ioutil.ReadAll(stderrp)
|
||||
err = cmd.Wait()
|
||||
@@ -622,7 +622,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
|
||||
}
|
||||
timer := time.AfterFunc(timeout, func() {
|
||||
glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
|
||||
klog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
|
||||
findCmd.Process.Kill()
|
||||
})
|
||||
err := findCmd.Wait()
|
||||
@@ -763,7 +763,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("btrfs mount %#v", mount)
|
||||
klog.V(4).Infof("btrfs mount %#v", mount)
|
||||
if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
|
||||
err := syscall.Stat(mount.Mountpoint, buf)
|
||||
if err != nil {
|
||||
@@ -771,8 +771,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
|
||||
glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
|
||||
klog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
|
||||
klog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
|
||||
|
||||
return int(major(buf.Dev)), int(minor(buf.Dev)), nil
|
||||
} else {
|
||||
|
15
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
15
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -102,11 +102,11 @@ type ContainerInfoRequest struct {
|
||||
NumStats int `json:"num_stats,omitempty"`
|
||||
|
||||
// Start time for which to query information.
|
||||
// If ommitted, the beginning of time is assumed.
|
||||
// If omitted, the beginning of time is assumed.
|
||||
Start time.Time `json:"start,omitempty"`
|
||||
|
||||
// End time for which to query information.
|
||||
// If ommitted, current time is assumed.
|
||||
// If omitted, current time is assumed.
|
||||
End time.Time `json:"end,omitempty"`
|
||||
}
|
||||
|
||||
@@ -557,6 +557,14 @@ type AcceleratorStats struct {
|
||||
DutyCycle uint64 `json:"duty_cycle"`
|
||||
}
|
||||
|
||||
type ProcessStats struct {
|
||||
// Number of processes
|
||||
ProcessCount uint64 `json:"process_count"`
|
||||
|
||||
// Number of open file descriptors
|
||||
FdCount uint64 `json:"fd_count"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
@@ -574,6 +582,9 @@ type ContainerStats struct {
|
||||
// Metrics for Accelerators. Each Accelerator corresponds to one element in the array.
|
||||
Accelerators []AcceleratorStats `json:"accelerators,omitempty"`
|
||||
|
||||
// ProcessStats for Containers
|
||||
Processes ProcessStats `json:"processes,omitempty"`
|
||||
|
||||
// Custom metrics from all collectors
|
||||
CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"`
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/info/v2/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/info/v2/BUILD
generated
vendored
@@ -11,8 +11,8 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/info/v2",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
3
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@@ -38,7 +38,7 @@ type CpuSpec struct {
|
||||
Mask string `json:"mask,omitempty"`
|
||||
// CPUQuota Default is disabled
|
||||
Quota uint64 `json:"quota,omitempty"`
|
||||
// Period is the CPU reference time in ns e.g the quota is compared aginst this.
|
||||
// Period is the CPU reference time in ns e.g the quota is compared against this.
|
||||
Period uint64 `json:"period,omitempty"`
|
||||
}
|
||||
|
||||
@@ -254,6 +254,7 @@ type ProcessInfo struct {
|
||||
RunningTime string `json:"running_time"`
|
||||
CgroupPath string `json:"cgroup_path"`
|
||||
Cmd string `json:"cmd"`
|
||||
FdCount int `json:"fd_count"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
|
10
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
10
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
@@ -18,8 +18,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
|
||||
@@ -70,7 +70,7 @@ func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
@@ -107,7 +107,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 && containerName != "/" {
|
||||
// Cannot handle multiple devices per container.
|
||||
glog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
klog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
@@ -168,7 +168,7 @@ func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/machine/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/machine/BUILD
generated
vendored
@@ -11,7 +11,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/docker/pkg/parsers/operatingsystem:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/fs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
@@ -19,6 +18,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/sysinfo:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
14
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -50,7 +50,7 @@ func getInfoFromFiles(filePaths string) string {
|
||||
return strings.TrimSpace(string(id))
|
||||
}
|
||||
}
|
||||
glog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
|
||||
klog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -117,27 +117,27 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
|
||||
filesystems, err := fsInfo.GetGlobalFsInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get global filesystem information: %v", err)
|
||||
klog.Errorf("Failed to get global filesystem information: %v", err)
|
||||
}
|
||||
|
||||
diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get disk map: %v", err)
|
||||
klog.Errorf("Failed to get disk map: %v", err)
|
||||
}
|
||||
|
||||
netDevices, err := sysinfo.GetNetworkDevices(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get network devices: %v", err)
|
||||
klog.Errorf("Failed to get network devices: %v", err)
|
||||
}
|
||||
|
||||
topology, numCores, err := GetTopology(sysFs, string(cpuinfo))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get topology information: %v", err)
|
||||
klog.Errorf("Failed to get topology information: %v", err)
|
||||
}
|
||||
|
||||
systemUUID, err := sysinfo.GetSystemUUID(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get system UUID: %v", err)
|
||||
klog.Errorf("Failed to get system UUID: %v", err)
|
||||
}
|
||||
|
||||
realCloudInfo := cloudinfo.NewRealCloudInfo()
|
||||
|
4
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
4
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -191,7 +191,7 @@ func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
|
||||
for idx, node := range nodes {
|
||||
caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0])
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
|
||||
klog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
|
||||
continue
|
||||
}
|
||||
numThreadsPerCore := len(node.Cores[0].Threads)
|
||||
|
2
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
@@ -11,7 +11,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/go-units:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/accelerators:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/cache/memory:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/collector:go_default_library",
|
||||
@@ -38,6 +37,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/version:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
46
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
46
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -39,7 +39,7 @@ import (
|
||||
"github.com/google/cadvisor/utils/cpuload"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,9 @@ import (
|
||||
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`)
|
||||
// cgroup type chosen to fetch the cgroup path of a process.
|
||||
// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers.
|
||||
var cgroupPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`)
|
||||
|
||||
type containerInfo struct {
|
||||
info.ContainerReference
|
||||
@@ -185,8 +187,8 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
|
||||
}
|
||||
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
glog.V(3).Infof("failed to get devices cgroup path from %q", cgroups)
|
||||
// return root in case of failures - devices hierarchy might not be enabled.
|
||||
klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
|
||||
// return root in case of failures - memory hierarchy might not be enabled.
|
||||
return "/", nil
|
||||
}
|
||||
return string(matches[1]), nil
|
||||
@@ -206,7 +208,7 @@ func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte,
|
||||
}
|
||||
for _, pid := range pids {
|
||||
filePath := path.Join(rootfs, "/proc", pid, "/root", filepath)
|
||||
glog.V(3).Infof("Trying path %q", filePath)
|
||||
klog.V(3).Infof("Trying path %q", filePath)
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err == nil {
|
||||
return data, err
|
||||
@@ -266,6 +268,10 @@ func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error)
|
||||
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
|
||||
// report all processes for root.
|
||||
isRoot := c.info.Name == "/"
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup"
|
||||
out, err := c.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
@@ -324,6 +330,15 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
|
||||
cgroupPath = cgroup
|
||||
}
|
||||
|
||||
var fdCount int
|
||||
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(pid), "fd")
|
||||
fds, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
|
||||
continue
|
||||
}
|
||||
fdCount = len(fds)
|
||||
|
||||
if isRoot || c.info.Name == cgroup {
|
||||
processes = append(processes, v2.ProcessInfo{
|
||||
User: fields[0],
|
||||
@@ -338,6 +353,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
|
||||
RunningTime: fields[9],
|
||||
Cmd: fields[10],
|
||||
CgroupPath: cgroupPath,
|
||||
FdCount: fdCount,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -377,7 +393,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
|
||||
// Create cpu load reader.
|
||||
loadReader, err := cpuload.New()
|
||||
if err != nil {
|
||||
glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
|
||||
klog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
|
||||
} else {
|
||||
cont.loadReader = loadReader
|
||||
}
|
||||
@@ -390,7 +406,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
|
||||
cont.summaryReader, err = summary.New(cont.info.Spec)
|
||||
if err != nil {
|
||||
cont.summaryReader = nil
|
||||
glog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err)
|
||||
klog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err)
|
||||
}
|
||||
|
||||
return cont, nil
|
||||
@@ -403,7 +419,7 @@ func (self *containerData) nextHousekeepingInterval() time.Duration {
|
||||
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
|
||||
if err != nil {
|
||||
if self.allowErrorLogging() {
|
||||
glog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
|
||||
klog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
|
||||
}
|
||||
} else if len(stats) == 2 {
|
||||
// TODO(vishnuk): Use no processes as a signal.
|
||||
@@ -433,7 +449,7 @@ func (c *containerData) housekeeping() {
|
||||
if c.loadReader != nil {
|
||||
err := c.loadReader.Start()
|
||||
if err != nil {
|
||||
glog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err)
|
||||
klog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err)
|
||||
}
|
||||
defer c.loadReader.Stop()
|
||||
}
|
||||
@@ -445,7 +461,7 @@ func (c *containerData) housekeeping() {
|
||||
}
|
||||
|
||||
// Housekeep every second.
|
||||
glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
klog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
houseKeepingTimer := c.clock.NewTimer(0 * time.Second)
|
||||
defer houseKeepingTimer.Stop()
|
||||
for {
|
||||
@@ -466,7 +482,7 @@ func (c *containerData) housekeeping() {
|
||||
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
|
||||
klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
|
||||
}
|
||||
} else if len(stats) < numSamples {
|
||||
// Ignore, not enough stats yet.
|
||||
@@ -483,7 +499,7 @@ func (c *containerData) housekeeping() {
|
||||
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
|
||||
usageInHuman := units.HumanSize(float64(usageMemory))
|
||||
// Don't set verbosity since this is already protected by the logUsage flag.
|
||||
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
}
|
||||
}
|
||||
houseKeepingTimer.Reset(c.nextHousekeepingInterval())
|
||||
@@ -504,13 +520,13 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin
|
||||
err := c.updateStats()
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
klog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
// Log if housekeeping took too long.
|
||||
duration := c.clock.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
|
||||
klog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
|
||||
}
|
||||
c.notifyOnDemand()
|
||||
c.statsLastUpdatedTime = c.clock.Now()
|
||||
@@ -584,7 +600,7 @@ func (c *containerData) updateStats() error {
|
||||
err := c.summaryReader.AddSample(*stats)
|
||||
if err != nil {
|
||||
// Ignore summary errors for now.
|
||||
glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
|
||||
klog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
var customStatsErr error
|
||||
|
82
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
82
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -49,9 +49,9 @@ import (
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@@ -152,7 +152,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
|
||||
klog.V(2).Infof("cAdvisor running in container: %q", selfContainer)
|
||||
|
||||
var (
|
||||
dockerStatus info.DockerStatus
|
||||
@@ -163,7 +163,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
dockerStatus = retryDockerStatus()
|
||||
|
||||
if tmpRktPath, err := rkt.RktPath(); err != nil {
|
||||
glog.V(5).Infof("Rkt not connected: %v", err)
|
||||
klog.V(5).Infof("Rkt not connected: %v", err)
|
||||
} else {
|
||||
rktPath = tmpRktPath
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
}
|
||||
crioInfo, err := crioClient.Info()
|
||||
if err != nil {
|
||||
glog.V(5).Infof("CRI-O not connected: %v", err)
|
||||
klog.V(5).Infof("CRI-O not connected: %v", err)
|
||||
}
|
||||
|
||||
context := fs.Context{
|
||||
@@ -226,13 +226,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
return nil, err
|
||||
}
|
||||
newManager.machineInfo = *machineInfo
|
||||
glog.V(1).Infof("Machine: %+v", newManager.machineInfo)
|
||||
klog.V(1).Infof("Machine: %+v", newManager.machineInfo)
|
||||
|
||||
versionInfo, err := getVersionInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(1).Infof("Version: %+v", *versionInfo)
|
||||
klog.V(1).Infof("Version: %+v", *versionInfo)
|
||||
|
||||
newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
|
||||
return newManager, nil
|
||||
@@ -250,9 +250,9 @@ func retryDockerStatus() info.DockerStatus {
|
||||
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
glog.Warningf("Timeout trying to communicate with docker during initialization, will retry")
|
||||
klog.Warningf("Timeout trying to communicate with docker during initialization, will retry")
|
||||
default:
|
||||
glog.V(5).Infof("Docker not connected: %v", err)
|
||||
klog.V(5).Infof("Docker not connected: %v", err)
|
||||
return info.DockerStatus{}
|
||||
}
|
||||
|
||||
@@ -298,12 +298,12 @@ type manager struct {
|
||||
func (self *manager) Start() error {
|
||||
err := docker.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
|
||||
klog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
|
||||
}
|
||||
|
||||
err = rkt.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
|
||||
klog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
|
||||
} else {
|
||||
watcher, err := rktwatcher.NewRktContainerWatcher()
|
||||
if err != nil {
|
||||
@@ -314,27 +314,27 @@ func (self *manager) Start() error {
|
||||
|
||||
err = containerd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
|
||||
klog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = crio.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
|
||||
klog.V(5).Infof("Registration of the crio container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = mesos.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
|
||||
klog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = systemd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
|
||||
klog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
|
||||
if err != nil {
|
||||
glog.Errorf("Registration of the raw container factory failed: %v", err)
|
||||
klog.Errorf("Registration of the raw container factory failed: %v", err)
|
||||
}
|
||||
|
||||
rawWatcher, err := rawwatcher.NewRawContainerWatcher()
|
||||
@@ -346,7 +346,7 @@ func (self *manager) Start() error {
|
||||
// Watch for OOMs.
|
||||
err = self.watchForNewOoms()
|
||||
if err != nil {
|
||||
glog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
|
||||
klog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
|
||||
}
|
||||
|
||||
// If there are no factories, don't start any housekeeping and serve the information we do have.
|
||||
@@ -362,12 +362,12 @@ func (self *manager) Start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Starting recovery of all containers")
|
||||
klog.V(2).Infof("Starting recovery of all containers")
|
||||
err = self.detectSubcontainers("/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Recovery completed")
|
||||
klog.V(2).Infof("Recovery completed")
|
||||
|
||||
// Watch for new container.
|
||||
quitWatcher := make(chan error)
|
||||
@@ -418,18 +418,18 @@ func (self *manager) globalHousekeeping(quit chan error) {
|
||||
// Check for new containers.
|
||||
err := self.detectSubcontainers("/")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to detect containers: %s", err)
|
||||
klog.Errorf("Failed to detect containers: %s", err)
|
||||
}
|
||||
|
||||
// Log if housekeeping took too long.
|
||||
duration := time.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
glog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
|
||||
klog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
|
||||
}
|
||||
case <-quit:
|
||||
// Quit if asked to do so.
|
||||
quit <- nil
|
||||
glog.Infof("Exiting global housekeeping thread")
|
||||
klog.Infof("Exiting global housekeeping thread")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -630,7 +630,7 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
|
||||
if err != nil {
|
||||
// Ignore the error because of race condition and return best-effort result.
|
||||
if err == memory.ErrDataNotFound {
|
||||
glog.Warningf("Error getting data for container %s because of race condition", name)
|
||||
klog.Warningf("Error getting data for container %s because of race condition", name)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
@@ -890,7 +890,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Got config from %q: %q", v, configFile)
|
||||
klog.V(4).Infof("Got config from %q: %q", v, configFile)
|
||||
|
||||
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
|
||||
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
|
||||
@@ -968,7 +968,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
}
|
||||
if !accept {
|
||||
// ignoring this container.
|
||||
glog.V(4).Infof("ignoring container %q", containerName)
|
||||
klog.V(4).Infof("ignoring container %q", containerName)
|
||||
return nil
|
||||
}
|
||||
collectorManager, err := collector.NewCollectorManager()
|
||||
@@ -983,11 +983,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
}
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
glog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
klog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -996,7 +996,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
collectorConfigs := collector.GetCollectorConfigs(labels)
|
||||
err = m.registerCollectors(collectorConfigs, cont)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to register collectors for %q: %v", containerName, err)
|
||||
klog.Warningf("Failed to register collectors for %q: %v", containerName, err)
|
||||
}
|
||||
|
||||
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
|
||||
@@ -1008,7 +1008,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
}] = cont
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
klog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contSpec, err := cont.handler.GetSpec()
|
||||
if err != nil {
|
||||
@@ -1065,7 +1065,7 @@ func (m *manager) destroyContainerLocked(containerName string) error {
|
||||
Name: alias,
|
||||
})
|
||||
}
|
||||
glog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
klog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contRef, err := cont.handler.ContainerReference()
|
||||
if err != nil {
|
||||
@@ -1144,7 +1144,7 @@ func (m *manager) detectSubcontainers(containerName string) error {
|
||||
for _, cont := range added {
|
||||
err = m.createContainer(cont.Name, watcher.Raw)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
|
||||
klog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1152,7 +1152,7 @@ func (m *manager) detectSubcontainers(containerName string) error {
|
||||
for _, cont := range removed {
|
||||
err = m.destroyContainer(cont.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
|
||||
klog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1192,7 +1192,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
||||
err = self.destroyContainer(event.Name)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to process watch event %+v: %v", event, err)
|
||||
klog.Warningf("Failed to process watch event %+v: %v", event, err)
|
||||
}
|
||||
case <-quit:
|
||||
var errs partialFailure
|
||||
@@ -1209,7 +1209,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
||||
quit <- errs
|
||||
} else {
|
||||
quit <- nil
|
||||
glog.Infof("Exiting thread watching subcontainers")
|
||||
klog.Infof("Exiting thread watching subcontainers")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1219,7 +1219,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
||||
}
|
||||
|
||||
func (self *manager) watchForNewOoms() error {
|
||||
glog.V(2).Infof("Started watching for new ooms in manager")
|
||||
klog.V(2).Infof("Started watching for new ooms in manager")
|
||||
outStream := make(chan *oomparser.OomInstance, 10)
|
||||
oomLog, err := oomparser.New()
|
||||
if err != nil {
|
||||
@@ -1237,9 +1237,9 @@ func (self *manager) watchForNewOoms() error {
|
||||
}
|
||||
err := self.eventHandler.AddEvent(newEvent)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
|
||||
klog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
|
||||
}
|
||||
glog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
|
||||
klog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath)
|
||||
|
||||
newEvent = &info.Event{
|
||||
ContainerName: oomInstance.VictimContainerName,
|
||||
@@ -1254,7 +1254,7 @@ func (self *manager) watchForNewOoms() error {
|
||||
}
|
||||
err = self.eventHandler.AddEvent(newEvent)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
|
||||
klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -1285,12 +1285,12 @@ func parseEventsStoragePolicy() events.StoragePolicy {
|
||||
for _, part := range parts {
|
||||
items := strings.Split(part, "=")
|
||||
if len(items) != 2 {
|
||||
glog.Warningf("Unknown event storage policy %q when parsing max age", part)
|
||||
klog.Warningf("Unknown event storage policy %q when parsing max age", part)
|
||||
continue
|
||||
}
|
||||
dur, err := time.ParseDuration(items[1])
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to parse event max age duration %q: %v", items[1], err)
|
||||
klog.Warningf("Unable to parse event max age duration %q: %v", items[1], err)
|
||||
continue
|
||||
}
|
||||
if items[0] == "default" {
|
||||
@@ -1305,12 +1305,12 @@ func parseEventsStoragePolicy() events.StoragePolicy {
|
||||
for _, part := range parts {
|
||||
items := strings.Split(part, "=")
|
||||
if len(items) != 2 {
|
||||
glog.Warningf("Unknown event storage policy %q when parsing max event limit", part)
|
||||
klog.Warningf("Unknown event storage policy %q when parsing max event limit", part)
|
||||
continue
|
||||
}
|
||||
val, err := strconv.Atoi(items[1])
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to parse integer from %q: %v", items[1], err)
|
||||
klog.Warningf("Unable to parse integer from %q: %v", items[1], err)
|
||||
continue
|
||||
}
|
||||
if items[0] == "default" {
|
||||
|
4
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
4
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
@@ -7,11 +7,11 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/manager/watcher/raw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/golang.org/x/exp/inotify:go_default_library",
|
||||
"//vendor/github.com/sigma/go-inotify:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
12
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
@@ -26,9 +26,9 @@ import (
|
||||
"github.com/google/cadvisor/container/common"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
inotify "github.com/sigma/go-inotify"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/exp/inotify"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type rawContainerWatcher struct {
|
||||
@@ -84,10 +84,10 @@ func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error
|
||||
case event := <-self.watcher.Event():
|
||||
err := self.processEvent(event, events)
|
||||
if err != nil {
|
||||
glog.Warningf("Error while processing event (%+v): %v", event, err)
|
||||
klog.Warningf("Error while processing event (%+v): %v", event, err)
|
||||
}
|
||||
case err := <-self.watcher.Error():
|
||||
glog.Warningf("Error while watching %q:", "/", err)
|
||||
klog.Warningf("Error while watching %q: %v", "/", err)
|
||||
case <-self.stopWatcher:
|
||||
err := self.watcher.Close()
|
||||
if err == nil {
|
||||
@@ -126,7 +126,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
|
||||
if cleanup {
|
||||
_, err := self.watcher.RemoveWatch(containerName, dir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
klog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -143,7 +143,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve
|
||||
subcontainerName := path.Join(containerName, entry.Name())
|
||||
alreadyWatchingSubDir, err := self.watchDirectory(events, entryPath, subcontainerName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to watch directory %q: %v", entryPath, err)
|
||||
klog.Errorf("Failed to watch directory %q: %v", entryPath, err)
|
||||
if os.IsNotExist(err) {
|
||||
// The directory may have been removed before watching. Try to watch the other
|
||||
// subdirectories. (https://github.com/kubernetes/kubernetes/issues/28997)
|
||||
|
2
vendor/github.com/google/cadvisor/manager/watcher/rkt/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/manager/watcher/rkt/BUILD
generated
vendored
@@ -8,10 +8,10 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/rkt/api/v1alpha:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/rkt:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
10
vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go
generated
vendored
10
vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go
generated
vendored
@@ -23,8 +23,8 @@ import (
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
rktapi "github.com/coreos/rkt/api/v1alpha"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type rktContainerWatcher struct {
|
||||
@@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error {
|
||||
}
|
||||
|
||||
func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) {
|
||||
glog.V(1).Infof("Starting detectRktContainers thread")
|
||||
klog.V(1).Infof("Starting detectRktContainers thread")
|
||||
ticker := time.Tick(10 * time.Second)
|
||||
curpods := make(map[string]*rktapi.Pod)
|
||||
|
||||
@@ -62,13 +62,13 @@ func (self *rktContainerWatcher) detectRktContainers(events chan watcher.Contain
|
||||
case <-ticker:
|
||||
pods, err := listRunningPods()
|
||||
if err != nil {
|
||||
glog.Errorf("detectRktContainers: listRunningPods failed: %v", err)
|
||||
klog.Errorf("detectRktContainers: listRunningPods failed: %v", err)
|
||||
continue
|
||||
}
|
||||
curpods = self.syncRunningPods(pods, events, curpods)
|
||||
|
||||
case <-self.stopWatcher:
|
||||
glog.Infof("Exiting rktContainer Thread")
|
||||
klog.Infof("Exiting rktContainer Thread")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan
|
||||
for id, pod := range curpods {
|
||||
if _, ok := newpods[id]; !ok {
|
||||
for _, cgroup := range podToCgroup(pod) {
|
||||
glog.V(2).Infof("cgroup to delete = %v", cgroup)
|
||||
klog.V(2).Infof("cgroup to delete = %v", cgroup)
|
||||
self.sendDestroyEvent(cgroup, events)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/metrics/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/metrics/BUILD
generated
vendored
@@ -7,10 +7,10 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
63
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
63
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// infoProvider will usually be manager.Manager, but can be swapped out for testing.
|
||||
@@ -109,6 +109,7 @@ type PrometheusCollector struct {
|
||||
errors prometheus.Gauge
|
||||
containerMetrics []containerMetric
|
||||
containerLabelsFunc ContainerLabelsFunc
|
||||
includedMetrics container.MetricSet
|
||||
}
|
||||
|
||||
// NewPrometheusCollector returns a new PrometheusCollector. The passed
|
||||
@@ -137,6 +138,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
},
|
||||
},
|
||||
},
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
if includedMetrics.Has(container.CpuUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
@@ -336,7 +338,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
name: "container_memory_failures_total",
|
||||
help: "Cumulative count of memory allocation failures.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"type", "scope"},
|
||||
extraLabels: []string{"failure_type", "scope"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
@@ -835,6 +837,26 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.ProcessMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_processes",
|
||||
help: "Number of processes running inside the container.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Processes.ProcessCount)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_file_descriptors",
|
||||
help: "Number of open file descriptors for the container.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Processes.FdCount)}}
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -917,7 +939,7 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warningf("Couldn't get containers: %s", err)
|
||||
klog.Warningf("Couldn't get containers: %s", err)
|
||||
return
|
||||
}
|
||||
rawLabels := map[string]struct{}{}
|
||||
@@ -926,10 +948,11 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
rawLabels[l] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, container := range containers {
|
||||
|
||||
for _, cont := range containers {
|
||||
values := make([]string, 0, len(rawLabels))
|
||||
labels := make([]string, 0, len(rawLabels))
|
||||
containerLabels := c.containerLabelsFunc(container)
|
||||
containerLabels := c.containerLabelsFunc(cont)
|
||||
for l := range rawLabels {
|
||||
labels = append(labels, sanitizeLabelName(l))
|
||||
values = append(values, containerLabels[l])
|
||||
@@ -937,35 +960,35 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
|
||||
// Container spec
|
||||
desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.CreationTime.Unix()), values...)
|
||||
|
||||
if container.Spec.HasCpu {
|
||||
if cont.Spec.HasCpu {
|
||||
desc = prometheus.NewDesc("container_spec_cpu_period", "CPU period of the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Period), values...)
|
||||
if container.Spec.Cpu.Quota != 0 {
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Period), values...)
|
||||
if cont.Spec.Cpu.Quota != 0 {
|
||||
desc = prometheus.NewDesc("container_spec_cpu_quota", "CPU quota of the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Quota), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Quota), values...)
|
||||
}
|
||||
desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Limit), values...)
|
||||
|
||||
}
|
||||
if container.Spec.HasMemory {
|
||||
if cont.Spec.HasMemory {
|
||||
desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Limit), values...)
|
||||
desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.SwapLimit), values...)
|
||||
desc = prometheus.NewDesc("container_spec_memory_reservation_limit_bytes", "Memory reservation limit for the container.", labels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Reservation), values...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Reservation), values...)
|
||||
}
|
||||
|
||||
// Now for the actual metrics
|
||||
if len(container.Stats) == 0 {
|
||||
if len(cont.Stats) == 0 {
|
||||
continue
|
||||
}
|
||||
stats := container.Stats[0]
|
||||
stats := cont.Stats[0]
|
||||
for _, cm := range c.containerMetrics {
|
||||
if cm.condition != nil && !cm.condition(container.Spec) {
|
||||
if cm.condition != nil && !cm.condition(cont.Spec) {
|
||||
continue
|
||||
}
|
||||
desc := cm.desc(labels)
|
||||
@@ -980,7 +1003,7 @@ func (c *PrometheusCollector) collectVersionInfo(ch chan<- prometheus.Metric) {
|
||||
versionInfo, err := c.infoProvider.GetVersionInfo()
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warningf("Couldn't get version info: %s", err)
|
||||
klog.Warningf("Couldn't get version info: %s", err)
|
||||
return
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(versionInfoDesc, prometheus.GaugeValue, 1, []string{versionInfo.KernelVersion, versionInfo.ContainerOsVersion, versionInfo.DockerVersion, versionInfo.CadvisorVersion, versionInfo.CadvisorRevision}...)
|
||||
@@ -990,7 +1013,7 @@ func (c *PrometheusCollector) collectMachineInfo(ch chan<- prometheus.Metric) {
|
||||
machineInfo, err := c.infoProvider.GetMachineInfo()
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warningf("Couldn't get machine info: %s", err)
|
||||
klog.Warningf("Couldn't get machine info: %s", err)
|
||||
return
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(machineInfoCoresDesc, prometheus.GaugeValue, float64(machineInfo.NumCores))
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cloudinfo/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cloudinfo/BUILD
generated
vendored
@@ -16,8 +16,8 @@ go_library(
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go
generated
vendored
4
vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
func onGCE() bool {
|
||||
data, err := ioutil.ReadFile(gceProductName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error while reading product_name: %v", err)
|
||||
klog.V(2).Infof("Error while reading product_name: %v", err)
|
||||
return false
|
||||
}
|
||||
return strings.Contains(string(data), google)
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/BUILD
generated
vendored
@@ -7,9 +7,9 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/utils/cpuload",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/cpuload/netlink:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
4
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
@@ -19,8 +19,8 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/utils/cpuload/netlink"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type CpuLoadReader interface {
|
||||
@@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err)
|
||||
}
|
||||
glog.V(4).Info("Using a netlink-based load reader")
|
||||
klog.V(4).Info("Using a netlink-based load reader")
|
||||
return reader, nil
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/BUILD
generated
vendored
@@ -13,8 +13,8 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/utils/cpuload/netlink",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
6
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type NetlinkReader struct {
|
||||
@@ -38,7 +38,7 @@ func New() (*NetlinkReader, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
|
||||
}
|
||||
glog.V(4).Infof("Family id for taskstats: %d", id)
|
||||
klog.V(4).Infof("Family id for taskstats: %d", id)
|
||||
return &NetlinkReader{
|
||||
familyId: id,
|
||||
conn: conn,
|
||||
@@ -75,6 +75,6 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
glog.V(4).Infof("Task stats for %q: %+v", path, stats)
|
||||
klog.V(4).Infof("Task stats for %q: %+v", path, stats)
|
||||
return stats, nil
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/utils/oomparser/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/oomparser/BUILD
generated
vendored
@@ -8,7 +8,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/euank/go-kmsg-parser/kmsgparser:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
14
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/euank/go-kmsg-parser/kmsgparser"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -107,11 +107,11 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
for msg := range kmsgEntries {
|
||||
err := getContainerName(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
finished, err := getProcessNamePid(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if finished {
|
||||
oomCurrentInstance.TimeOfDeath = msg.Timestamp
|
||||
@@ -122,7 +122,7 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
}
|
||||
}
|
||||
// Should not happen
|
||||
glog.Errorf("exiting analyzeLines. OOM events will not be reported.")
|
||||
klog.Errorf("exiting analyzeLines. OOM events will not be reported.")
|
||||
}
|
||||
|
||||
// initializes an OomParser object. Returns an OomParser object and an error.
|
||||
@@ -140,11 +140,11 @@ type glogAdapter struct{}
|
||||
var _ kmsgparser.Logger = glogAdapter{}
|
||||
|
||||
func (glogAdapter) Infof(format string, args ...interface{}) {
|
||||
glog.V(4).Infof(format, args)
|
||||
klog.V(4).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Warningf(format string, args ...interface{}) {
|
||||
glog.V(2).Infof(format, args)
|
||||
klog.V(2).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Errorf(format string, args ...interface{}) {
|
||||
glog.Warningf(format, args)
|
||||
klog.Warningf(format, args...)
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/zfs/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/zfs/BUILD
generated
vendored
@@ -7,8 +7,8 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/zfs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/mistifyio/go-zfs:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/github.com/google/cadvisor/zfs/watcher.go
generated
vendored
12
vendor/github.com/google/cadvisor/zfs/watcher.go
generated
vendored
@@ -18,8 +18,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// zfsWatcher maintains a cache of filesystem -> usage stats for a
|
||||
@@ -49,7 +49,7 @@ func NewZfsWatcher(filesystem string) (*ZfsWatcher, error) {
|
||||
func (w *ZfsWatcher) Start() {
|
||||
err := w.Refresh()
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error refreshing zfs watcher: %v", err)
|
||||
klog.Errorf("encountered error refreshing zfs watcher: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
@@ -60,12 +60,12 @@ func (w *ZfsWatcher) Start() {
|
||||
start := time.Now()
|
||||
err = w.Refresh()
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error refreshing zfs watcher: %v", err)
|
||||
klog.Errorf("encountered error refreshing zfs watcher: %v", err)
|
||||
}
|
||||
|
||||
// print latency for refresh
|
||||
duration := time.Since(start)
|
||||
glog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration)
|
||||
klog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -95,12 +95,12 @@ func (w *ZfsWatcher) Refresh() error {
|
||||
newCache := make(map[string]uint64)
|
||||
parent, err := zfs.GetDataset(w.filesystem)
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err)
|
||||
klog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err)
|
||||
return err
|
||||
}
|
||||
children, err := parent.Children(0)
|
||||
if err != nil {
|
||||
glog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err)
|
||||
klog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user