update cadvisor godeps
This commit is contained in:
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
@@ -37,7 +37,10 @@ func Status() (v1.DockerStatus, error) {
|
||||
if err != nil {
|
||||
return v1.DockerStatus{}, err
|
||||
}
|
||||
return StatusFromDockerInfo(dockerInfo), nil
|
||||
}
|
||||
|
||||
func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus {
|
||||
out := v1.DockerStatus{}
|
||||
out.Version = VersionString()
|
||||
out.APIVersion = APIVersionString()
|
||||
@@ -53,7 +56,7 @@ func Status() (v1.DockerStatus, error) {
|
||||
for _, v := range dockerInfo.DriverStatus {
|
||||
out.DriverStatus[v[0]] = v[1]
|
||||
}
|
||||
return out, nil
|
||||
return out
|
||||
}
|
||||
|
||||
func Images() ([]v1.DockerImage, error) {
|
||||
|
||||
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -84,6 +84,7 @@ const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
overlay2StorageDriver storageDriver = "overlay2"
|
||||
zfsStorageDriver storageDriver = "zfs"
|
||||
)
|
||||
|
||||
@@ -107,6 +108,7 @@ type dockerFactory struct {
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -136,6 +138,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
)
|
||||
@@ -323,12 +326,18 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
var thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
var (
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
thinPoolName string
|
||||
)
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
status := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
var zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -350,6 +359,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
}
|
||||
|
||||
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -112,6 +113,9 @@ type dockerContainerHandler struct {
|
||||
|
||||
// zfs watcher
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||
@@ -146,6 +150,7 @@ func newDockerContainerHandler(
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
) (container.ContainerHandler, error) {
|
||||
@@ -180,18 +185,18 @@ func newDockerContainerHandler(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device.
|
||||
// For devicemapper, we only need the thin pool name, and that is passed in to this call
|
||||
var (
|
||||
rootfsStorageDir string
|
||||
poolName string
|
||||
zfsFilesystem string
|
||||
zfsParent string
|
||||
)
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
|
||||
case overlayStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
|
||||
case overlayStorageDriver, overlay2StorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID)
|
||||
case zfsStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
@@ -199,13 +204,6 @@ func newDockerContainerHandler(
|
||||
}
|
||||
zfsParent = status.DriverStatus[dockerutil.DriverStatusParentDataset]
|
||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||
case devicemapperStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine docker status: %v", err)
|
||||
}
|
||||
|
||||
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
@@ -219,7 +217,7 @@ func newDockerContainerHandler(
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
poolName: poolName,
|
||||
poolName: thinPoolName,
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
@@ -248,6 +246,7 @@ func newDockerContainerHandler(
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
||||
handler.restartCount = ctnr.RestartCount
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
|
||||
@@ -383,6 +382,10 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
// Only adds restartcount label if it's greater than 0
|
||||
if self.restartCount > 0 {
|
||||
spec.Labels["restartcount"] = strconv.Itoa(self.restartCount)
|
||||
}
|
||||
spec.Envs = self.envs
|
||||
spec.Image = self.image
|
||||
|
||||
@@ -390,6 +393,15 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -399,7 +411,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
// Device has to be the pool name to correlate with the device name as
|
||||
// set in the machine info filesystems.
|
||||
device = self.poolName
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, overlay2StorageDriver:
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
|
||||
@@ -411,11 +423,6 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return nil
|
||||
}
|
||||
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
limit uint64
|
||||
fsType string
|
||||
|
||||
Reference in New Issue
Block a user