update cadvisor godeps
This commit is contained in:
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -223,3 +223,71 @@ func ListContainers(name string, cgroupPaths map[string]string, listType contain
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// AssignDeviceNamesToDiskStats assigns the Device field on the provided DiskIoStats by looking up
|
||||
// the device major and minor identifiers in the provided device namer.
|
||||
func AssignDeviceNamesToDiskStats(namer DeviceNamer, stats *info.DiskIoStats) {
|
||||
assignDeviceNamesToPerDiskStats(
|
||||
namer,
|
||||
stats.IoMerged,
|
||||
stats.IoQueued,
|
||||
stats.IoServiceBytes,
|
||||
stats.IoServiceTime,
|
||||
stats.IoServiced,
|
||||
stats.IoTime,
|
||||
stats.IoWaitTime,
|
||||
stats.Sectors,
|
||||
)
|
||||
}
|
||||
|
||||
// assignDeviceNamesToPerDiskStats looks up device names for the provided stats, caching names
|
||||
// if necessary.
|
||||
func assignDeviceNamesToPerDiskStats(namer DeviceNamer, diskStats ...[]info.PerDiskStats) {
|
||||
devices := make(deviceIdentifierMap)
|
||||
for _, stats := range diskStats {
|
||||
for i, stat := range stats {
|
||||
stats[i].Device = devices.Find(stat.Major, stat.Minor, namer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeviceNamer returns string names for devices by their major and minor id.
|
||||
type DeviceNamer interface {
|
||||
// DeviceName returns the name of the device by its major and minor ids, or false if no
|
||||
// such device is recognized.
|
||||
DeviceName(major, minor uint64) (string, bool)
|
||||
}
|
||||
|
||||
type MachineInfoNamer info.MachineInfo
|
||||
|
||||
func (n *MachineInfoNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.DiskMap {
|
||||
if info.Major == major && info.Minor == minor {
|
||||
return "/dev/" + info.Name, true
|
||||
}
|
||||
}
|
||||
for _, info := range n.Filesystems {
|
||||
if info.DeviceMajor == major && info.DeviceMinor == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
type deviceIdentifier struct {
|
||||
major uint64
|
||||
minor uint64
|
||||
}
|
||||
|
||||
type deviceIdentifierMap map[deviceIdentifier]string
|
||||
|
||||
// Find locates the device name by device identifier out of from, caching the result as necessary.
|
||||
func (m deviceIdentifierMap) Find(major, minor uint64, namer DeviceNamer) string {
|
||||
d := deviceIdentifier{major, minor}
|
||||
if s, ok := m[d]; ok {
|
||||
return s
|
||||
}
|
||||
s, _ := namer.DeviceName(major, minor)
|
||||
m[d] = s
|
||||
return s
|
||||
}
|
||||
|
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
@@ -37,7 +37,10 @@ func Status() (v1.DockerStatus, error) {
|
||||
if err != nil {
|
||||
return v1.DockerStatus{}, err
|
||||
}
|
||||
return StatusFromDockerInfo(dockerInfo), nil
|
||||
}
|
||||
|
||||
func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus {
|
||||
out := v1.DockerStatus{}
|
||||
out.Version = VersionString()
|
||||
out.APIVersion = APIVersionString()
|
||||
@@ -53,7 +56,7 @@ func Status() (v1.DockerStatus, error) {
|
||||
for _, v := range dockerInfo.DriverStatus {
|
||||
out.DriverStatus[v[0]] = v[1]
|
||||
}
|
||||
return out, nil
|
||||
return out
|
||||
}
|
||||
|
||||
func Images() ([]v1.DockerImage, error) {
|
||||
|
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -84,6 +84,7 @@ const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
overlay2StorageDriver storageDriver = "overlay2"
|
||||
zfsStorageDriver storageDriver = "zfs"
|
||||
)
|
||||
|
||||
@@ -107,6 +108,7 @@ type dockerFactory struct {
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -136,6 +138,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
)
|
||||
@@ -323,12 +326,18 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
var thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
var (
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
thinPoolName string
|
||||
)
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
status := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
var zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -350,6 +359,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
}
|
||||
|
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -112,6 +113,9 @@ type dockerContainerHandler struct {
|
||||
|
||||
// zfs watcher
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||
@@ -146,6 +150,7 @@ func newDockerContainerHandler(
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
) (container.ContainerHandler, error) {
|
||||
@@ -180,18 +185,18 @@ func newDockerContainerHandler(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device.
|
||||
// For devicemapper, we only need the thin pool name, and that is passed in to this call
|
||||
var (
|
||||
rootfsStorageDir string
|
||||
poolName string
|
||||
zfsFilesystem string
|
||||
zfsParent string
|
||||
)
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
|
||||
case overlayStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
|
||||
case overlayStorageDriver, overlay2StorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID)
|
||||
case zfsStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
@@ -199,13 +204,6 @@ func newDockerContainerHandler(
|
||||
}
|
||||
zfsParent = status.DriverStatus[dockerutil.DriverStatusParentDataset]
|
||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||
case devicemapperStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine docker status: %v", err)
|
||||
}
|
||||
|
||||
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
@@ -219,7 +217,7 @@ func newDockerContainerHandler(
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
poolName: poolName,
|
||||
poolName: thinPoolName,
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
@@ -248,6 +246,7 @@ func newDockerContainerHandler(
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
||||
handler.restartCount = ctnr.RestartCount
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
|
||||
@@ -383,6 +382,10 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
// Only adds restartcount label if it's greater than 0
|
||||
if self.restartCount > 0 {
|
||||
spec.Labels["restartcount"] = strconv.Itoa(self.restartCount)
|
||||
}
|
||||
spec.Envs = self.envs
|
||||
spec.Image = self.image
|
||||
|
||||
@@ -390,6 +393,15 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -399,7 +411,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
// Device has to be the pool name to correlate with the device name as
|
||||
// set in the machine info filesystems.
|
||||
device = self.poolName
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, overlay2StorageDriver:
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
|
||||
@@ -411,11 +423,6 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return nil
|
||||
}
|
||||
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
limit uint64
|
||||
fsType string
|
||||
|
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -48,6 +48,7 @@ const (
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
|
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -17,6 +17,7 @@ package libcontainer
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -118,6 +119,21 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetri
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(rootFs, pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(rootFs, pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
@@ -291,6 +307,74 @@ func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUdpStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -197,6 +197,7 @@ func fsToFsStats(fs *fs.Fs) info.FsStats {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
var allFs []fs.Fs
|
||||
// Get Filesystem information only for the root cgroup.
|
||||
if isRootCgroup(self.name) {
|
||||
filesystems, err := self.fsInfo.GetGlobalFsInfo()
|
||||
@@ -207,6 +208,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
} else if len(self.externalMounts) > 0 {
|
||||
var mountSet map[string]struct{}
|
||||
mountSet = make(map[string]struct{})
|
||||
@@ -221,7 +223,10 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
}
|
||||
|
||||
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: allFs, factory: self.machineInfoFactory}, &stats.DiskIo)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -272,3 +277,25 @@ func (self *rawContainerHandler) Exists() bool {
|
||||
func (self *rawContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeRaw
|
||||
}
|
||||
|
||||
type fsNamer struct {
|
||||
fs []fs.Fs
|
||||
factory info.MachineInfoFactory
|
||||
info common.DeviceNamer
|
||||
}
|
||||
|
||||
func (n *fsNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.fs {
|
||||
if uint64(info.Major) == major && uint64(info.Minor) == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
if n.info == nil {
|
||||
mi, err := n.factory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
n.info = (*common.MachineInfoNamer)(mi)
|
||||
}
|
||||
return n.info.DeviceName(major, minor)
|
||||
}
|
||||
|
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -202,6 +202,15 @@ func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -211,10 +220,6 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var limit uint64 = 0
|
||||
|
||||
// Use capacity as limit.
|
||||
|
18
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
18
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -266,7 +266,7 @@ func getDockerImagePaths(context Context) map[string]struct{} {
|
||||
|
||||
// TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
|
||||
dockerRoot := context.Docker.Root
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "zfs"} {
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "overlay2", "zfs"} {
|
||||
dockerImagePaths[path.Join(dockerRoot, dir)] = struct{}{}
|
||||
}
|
||||
for dockerRoot != "/" && dockerRoot != "." {
|
||||
@@ -455,11 +455,15 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
return GetDirDiskUsage(dir, timeout)
|
||||
}
|
||||
|
||||
func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
cmd := exec.Command("nice", "-n", "19", "du", "-s", dir)
|
||||
stdoutp, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
@@ -496,13 +500,17 @@ func (self *RealFsInfo) GetDirDiskUsage(dir string, timeout time.Duration) (uint
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
return GetDirInodeUsage(dir, timeout)
|
||||
}
|
||||
|
||||
func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
findCmd := exec.Command("find", dir, "-xdev", "-printf", ".")
|
||||
findCmd.Stdout, findCmd.Stderr = &counter, &stderr
|
||||
if err := findCmd.Start(); err != nil {
|
||||
|
1
vendor/github.com/google/cadvisor/http/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/http/BUILD
generated
vendored
@@ -23,6 +23,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/pages/static:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/validate:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
19
vendor/github.com/google/cadvisor/http/handlers.go
generated
vendored
19
vendor/github.com/google/cadvisor/http/handlers.go
generated
vendored
@@ -17,6 +17,7 @@ package http
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/cadvisor/api"
|
||||
"github.com/google/cadvisor/healthz"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm string) error {
|
||||
@@ -54,7 +56,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
|
||||
// Redirect / to containers page.
|
||||
mux.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect))
|
||||
|
||||
var authenticated bool = false
|
||||
var authenticated bool
|
||||
|
||||
// Setup the authenticator object
|
||||
if httpAuthFile != "" {
|
||||
@@ -89,13 +91,16 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPrometheusHandler creates a new PrometheusCollector, registers it
|
||||
// on the global registry and configures the provided HTTP mux to handle the
|
||||
// given Prometheus endpoint.
|
||||
// RegisterPrometheusHandler creates a new PrometheusCollector and configures
|
||||
// the provided HTTP mux to handle the given Prometheus endpoint.
|
||||
func RegisterPrometheusHandler(mux httpmux.Mux, containerManager manager.Manager, prometheusEndpoint string, f metrics.ContainerLabelsFunc) {
|
||||
collector := metrics.NewPrometheusCollector(containerManager, f)
|
||||
prometheus.MustRegister(collector)
|
||||
mux.Handle(prometheusEndpoint, prometheus.Handler())
|
||||
r := prometheus.NewRegistry()
|
||||
r.MustRegister(
|
||||
metrics.NewPrometheusCollector(containerManager, f),
|
||||
prometheus.NewGoCollector(),
|
||||
prometheus.NewProcessCollector(os.Getpid(), ""),
|
||||
)
|
||||
mux.Handle(prometheusEndpoint, promhttp.HandlerFor(r, promhttp.HandlerOpts{}))
|
||||
}
|
||||
|
||||
func staticHandlerNoAuth(w http.ResponseWriter, r *http.Request) {
|
||||
|
25
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
25
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -307,9 +307,10 @@ type CpuStats struct {
|
||||
}
|
||||
|
||||
type PerDiskStats struct {
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
Device string `json:"-"`
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
}
|
||||
|
||||
type DiskIoStats struct {
|
||||
@@ -386,6 +387,10 @@ type NetworkStats struct {
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
// UDP connection stats
|
||||
Udp UdpStat `json:"udp"`
|
||||
// UDP6 connection stats
|
||||
Udp6 UdpStat `json:"udp6"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
@@ -413,6 +418,20 @@ type TcpStat struct {
|
||||
Closing uint64
|
||||
}
|
||||
|
||||
type UdpStat struct {
|
||||
// Count of UDP sockets in state "Listen"
|
||||
Listen uint64
|
||||
|
||||
// Count of UDP packets dropped by the IP stack
|
||||
Dropped uint64
|
||||
|
||||
// Count of packets Queued for Receieve
|
||||
RxQueued uint64
|
||||
|
||||
// Count of packets Queued for Transmit
|
||||
TxQueued uint64
|
||||
}
|
||||
|
||||
type FsStats struct {
|
||||
// The block device name associated with the filesystem.
|
||||
Device string `json:"device,omitempty"`
|
||||
|
4
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
4
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
@@ -17,6 +17,10 @@ package v1
|
||||
type FsInfo struct {
|
||||
// Block device associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
// DeviceMajor is the major identifier of the device, used for correlation with blkio stats
|
||||
DeviceMajor uint64 `json:"-"`
|
||||
// DeviceMinor is the minor identifier of the device, used for correlation with blkio stats
|
||||
DeviceMinor uint64 `json:"-"`
|
||||
|
||||
// Total number of bytes available on the filesystem.
|
||||
Capacity uint64 `json:"capacity"`
|
||||
|
4
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
4
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@@ -269,6 +269,10 @@ type NetworkStats struct {
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
// UDP connection stats
|
||||
Udp v1.UdpStat `json:"udp"`
|
||||
// UDP6 connection stats
|
||||
Udp6 v1.UdpStat `json:"udp6"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
|
3
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
3
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
@@ -133,7 +133,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 && containerName != "/" {
|
||||
// Cannot handle multiple devices per container.
|
||||
glog.V(2).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
glog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
@@ -259,6 +259,7 @@ func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace s
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
Labels: specV1.Labels,
|
||||
Envs: specV1.Envs,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
|
2
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
2
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
@@ -116,7 +116,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
if fs.Inodes != nil {
|
||||
inodes = *fs.Inodes
|
||||
}
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, DeviceMajor: uint64(fs.Major), DeviceMinor: uint64(fs.Minor), Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
|
||||
}
|
||||
|
||||
return machineInfo, nil
|
||||
|
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -566,9 +566,24 @@ func (self *manager) getDockerContainer(containerName string) (*containerData, e
|
||||
Namespace: docker.DockerNamespace,
|
||||
Name: containerName,
|
||||
}]
|
||||
|
||||
// Look for container by short prefix name if no exact match found.
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
for contName, c := range self.containers {
|
||||
if contName.Namespace == docker.DockerNamespace && strings.HasPrefix(contName.Name, containerName) {
|
||||
if cont == nil {
|
||||
cont = c
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to find container. Container %q is not unique", containerName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cont == nil {
|
||||
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
|
148
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
148
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -45,6 +45,14 @@ type metricValue struct {
|
||||
|
||||
type metricValues []metricValue
|
||||
|
||||
// asFloat64 converts a uint64 into a float64.
|
||||
func asFloat64(v uint64) float64 { return float64(v) }
|
||||
|
||||
// asNanosecondsToSeconds converts nanoseconds into a float64 representing seconds.
|
||||
func asNanosecondsToSeconds(v uint64) float64 {
|
||||
return float64(v) / float64(time.Second)
|
||||
}
|
||||
|
||||
// fsValues is a helper method for assembling per-filesystem stats.
|
||||
func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
||||
values := make(metricValues, 0, len(fsStats))
|
||||
@@ -57,6 +65,24 @@ func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metri
|
||||
return values
|
||||
}
|
||||
|
||||
// ioValues is a helper method for assembling per-disk and per-filesystem stats.
|
||||
func ioValues(ioStats []info.PerDiskStats, ioType string, ioValueFn func(uint64) float64, fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
||||
values := make(metricValues, 0, len(ioStats)+len(fsStats))
|
||||
for _, stat := range ioStats {
|
||||
values = append(values, metricValue{
|
||||
value: ioValueFn(stat.Stats[ioType]),
|
||||
labels: []string{stat.Device},
|
||||
})
|
||||
}
|
||||
for _, stat := range fsStats {
|
||||
values = append(values, metricValue{
|
||||
value: valueFn(&stat),
|
||||
labels: []string{stat.Device},
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// containerMetric describes a multi-dimensional metric used for exposing a
|
||||
// certain type of container statistic.
|
||||
type containerMetric struct {
|
||||
@@ -130,10 +156,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.Cpu.Usage.PerCpu))
|
||||
for i, value := range s.Cpu.Usage.PerCpu {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value) / float64(time.Second),
|
||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||
})
|
||||
if value > 0 {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value) / float64(time.Second),
|
||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||
})
|
||||
}
|
||||
}
|
||||
return values
|
||||
},
|
||||
@@ -268,15 +296,29 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.Usage)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_bytes_total",
|
||||
help: "Cumulative count of bytes read",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceBytes, "Read", asFloat64,
|
||||
nil, nil,
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_total",
|
||||
help: "Cumulative count of reads completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsCompleted)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiced, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsCompleted)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_reads_total",
|
||||
@@ -284,9 +326,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsRead)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.Sectors, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsRead)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_merged_total",
|
||||
@@ -294,9 +339,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsMerged)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoMerged, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsMerged)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_read_seconds_total",
|
||||
@@ -304,9 +352,23 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadTime) / float64(time.Second)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Read", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadTime) / float64(time.Second)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_bytes_total",
|
||||
help: "Cumulative count of bytes written",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceBytes, "Write", asFloat64,
|
||||
nil, nil,
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_total",
|
||||
@@ -314,9 +376,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesCompleted)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiced, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesCompleted)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_writes_total",
|
||||
@@ -324,9 +389,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsWritten)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.Sectors, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsWritten)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_merged_total",
|
||||
@@ -334,9 +402,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesMerged)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoMerged, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesMerged)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_write_seconds_total",
|
||||
@@ -344,9 +415,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WriteTime) / float64(time.Second)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Write", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WriteTime) / float64(time.Second)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_current",
|
||||
@@ -354,9 +428,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.IoInProgress)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoQueued, "Total", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.IoInProgress)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_seconds_total",
|
||||
@@ -364,9 +441,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Total", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_weighted_seconds_total",
|
||||
|
Reference in New Issue
Block a user