updated cadvisor to latest version; updated aws dependency to same as cadvisor
This commit is contained in:
2
vendor/github.com/google/cadvisor/api/versions.go
generated
vendored
2
vendor/github.com/google/cadvisor/api/versions.go
generated
vendored
@@ -509,7 +509,7 @@ func (self *version2_1) HandleRequest(requestType string, request []string, m ma
|
||||
}
|
||||
contStats[name] = v2.ContainerInfo{
|
||||
Spec: v2.ContainerSpecFromV1(&cont.Spec, cont.Aliases, cont.Namespace),
|
||||
Stats: v2.ContainerStatsFromV1(&cont.Spec, cont.Stats),
|
||||
Stats: v2.ContainerStatsFromV1(name, &cont.Spec, cont.Stats),
|
||||
}
|
||||
}
|
||||
return writeResult(contStats, w)
|
||||
|
16
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
16
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -141,17 +141,21 @@ func ContainerNameToDockerId(name string) string {
|
||||
return id
|
||||
}
|
||||
|
||||
// isContainerName returns true if the cgroup with associated name
|
||||
// corresponds to a docker container.
|
||||
func isContainerName(name string) bool {
|
||||
// always ignore .mount cgroup even if associated with docker and delegate to systemd
|
||||
if strings.HasSuffix(name, ".mount") {
|
||||
return false
|
||||
}
|
||||
return dockerCgroupRegexp.MatchString(path.Base(name))
|
||||
}
|
||||
|
||||
// Docker handles all containers under /docker
|
||||
func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// docker factory accepts all containers it can handle.
|
||||
canAccept := true
|
||||
|
||||
// if the container is not associated with docker, we can't handle it or accept it.
|
||||
if !isContainerName(name) {
|
||||
return false, canAccept, fmt.Errorf("invalid container name")
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Check if the container is known to docker and it is active.
|
||||
@@ -160,10 +164,10 @@ func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := self.client.ContainerInspect(context.Background(), id)
|
||||
if err != nil || !ctnr.State.Running {
|
||||
return false, canAccept, fmt.Errorf("error inspecting container: %v", err)
|
||||
return false, true, fmt.Errorf("error inspecting container: %v", err)
|
||||
}
|
||||
|
||||
return true, canAccept, nil
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
func (self *dockerFactory) DebugInfo() map[string][]string {
|
||||
|
46
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
46
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -346,7 +346,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
return filesystems, nil
|
||||
}
|
||||
|
||||
var partitionRegex = regexp.MustCompile(`^(?:(?:s|xv)d[a-z]+\d*|dm-\d+)$`)
|
||||
var partitionRegex = regexp.MustCompile(`^(?:(?:s|v|xv)d[a-z]+\d*|dm-\d+)$`)
|
||||
|
||||
func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
diskStatsMap := make(map[string]DiskStats)
|
||||
@@ -474,42 +474,24 @@ func (self *RealFsInfo) GetDirInodeUsage(dir string, timeout time.Duration) (uin
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
var stdout, stdwcerr, stdfinderr bytes.Buffer
|
||||
var err error
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
findCmd := exec.Command("find", dir, "-xdev", "-printf", ".")
|
||||
wcCmd := exec.Command("wc", "-c")
|
||||
if wcCmd.Stdin, err = findCmd.StdoutPipe(); err != nil {
|
||||
return 0, fmt.Errorf("failed to setup stdout for cmd %v - %v", findCmd.Args, err)
|
||||
}
|
||||
wcCmd.Stdout, wcCmd.Stderr, findCmd.Stderr = &stdout, &stdwcerr, &stdfinderr
|
||||
if err = findCmd.Start(); err != nil {
|
||||
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stdfinderr.String())
|
||||
}
|
||||
|
||||
if err = wcCmd.Start(); err != nil {
|
||||
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr %v", wcCmd.Args, err, stdwcerr.String())
|
||||
findCmd.Stdout, findCmd.Stderr = &counter, &stderr
|
||||
if err := findCmd.Start(); err != nil {
|
||||
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
|
||||
}
|
||||
timer := time.AfterFunc(timeout, func() {
|
||||
glog.Infof("killing cmd %v, and cmd %v due to timeout(%s)", findCmd.Args, wcCmd.Args, timeout.String())
|
||||
wcCmd.Process.Kill()
|
||||
glog.Infof("killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
|
||||
findCmd.Process.Kill()
|
||||
})
|
||||
err = findCmd.Wait()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stdfinderr.String(), err)
|
||||
}
|
||||
err = wcCmd.Wait()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", wcCmd.Args, stdwcerr.String(), err)
|
||||
if err := findCmd.Wait(); err != nil {
|
||||
return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err)
|
||||
}
|
||||
timer.Stop()
|
||||
inodeUsage, err := strconv.ParseUint(strings.TrimSpace(stdout.String()), 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse cmds: %v, %v output %s - %s", findCmd.Args, wcCmd.Args, stdout.String(), err)
|
||||
}
|
||||
return inodeUsage, nil
|
||||
return counter.bytesWritten, nil
|
||||
}
|
||||
|
||||
func getVfsStats(path string) (total uint64, free uint64, avail uint64, inodes uint64, inodesFree uint64, err error) {
|
||||
@@ -621,3 +603,11 @@ func getZfstats(poolName string) (uint64, uint64, uint64, error) {
|
||||
|
||||
return total, dataset.Avail, dataset.Avail, nil
|
||||
}
|
||||
|
||||
// Simple io.Writer implementation that counts how many bytes were written.
|
||||
type byteCounter struct{ bytesWritten uint64 }
|
||||
|
||||
func (b *byteCounter) Write(p []byte) (int, error) {
|
||||
b.bytesWritten += uint64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
8
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
8
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
@@ -96,7 +96,7 @@ func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
|
||||
return stats
|
||||
}
|
||||
|
||||
func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []*ContainerStats {
|
||||
func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []*v1.ContainerStats) []*ContainerStats {
|
||||
newStats := make([]*ContainerStats, 0, len(stats))
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range stats {
|
||||
@@ -119,6 +119,8 @@ func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []
|
||||
if spec.HasNetwork {
|
||||
// TODO: Handle TcpStats
|
||||
stat.Network = &NetworkStats{
|
||||
Tcp: TcpStat(val.Network.Tcp),
|
||||
Tcp6: TcpStat(val.Network.Tcp6),
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
@@ -129,9 +131,9 @@ func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []
|
||||
BaseUsageBytes: &val.Filesystem[0].BaseUsage,
|
||||
InodeUsage: &val.Filesystem[0].Inodes,
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 {
|
||||
} else if len(val.Filesystem) > 1 && containerName != "/" {
|
||||
// Cannot handle multiple devices per container.
|
||||
glog.V(2).Infof("failed to handle multiple devices for container. Skipping Filesystem stats")
|
||||
glog.V(2).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
|
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -464,7 +464,7 @@ func (self *manager) GetContainerInfoV2(containerName string, options v2.Request
|
||||
continue
|
||||
}
|
||||
|
||||
result.Stats = v2.ContainerStatsFromV1(&cinfo.Spec, stats)
|
||||
result.Stats = v2.ContainerStatsFromV1(containerName, &cinfo.Spec, stats)
|
||||
infos[name] = result
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/pages/static/assets.go
generated
vendored
2
vendor/github.com/google/cadvisor/pages/static/assets.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
2
vendor/github.com/google/cadvisor/pages/templates.go
generated
vendored
2
vendor/github.com/google/cadvisor/pages/templates.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
28
vendor/github.com/google/cadvisor/utils/tail/tail.go
generated
vendored
28
vendor/github.com/google/cadvisor/utils/tail/tail.go
generated
vendored
@@ -45,6 +45,16 @@ const (
|
||||
|
||||
// NewTail starts opens the given file and watches it for deletion/rotation
|
||||
func NewTail(filename string) (*Tail, error) {
|
||||
t, err := newTail(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go t.watchLoop()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// newTail creates a Tail object.
|
||||
func newTail(filename string) (*Tail, error) {
|
||||
t := &Tail{
|
||||
filename: filename,
|
||||
}
|
||||
@@ -54,7 +64,9 @@ func NewTail(filename string) (*Tail, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inotify init failed on %s: %v", t.filename, err)
|
||||
}
|
||||
go t.watchLoop()
|
||||
// Initialize readerErr as io.EOF, so that the reader can work properly
|
||||
// during initialization.
|
||||
t.readerErr = io.EOF
|
||||
return t, nil
|
||||
}
|
||||
|
||||
@@ -62,25 +74,26 @@ func NewTail(filename string) (*Tail, error) {
|
||||
func (t *Tail) Read(p []byte) (int, error) {
|
||||
t.readerLock.RLock()
|
||||
defer t.readerLock.RUnlock()
|
||||
if t.reader == nil || t.readerErr != nil {
|
||||
if t.readerErr != nil {
|
||||
return 0, t.readerErr
|
||||
}
|
||||
return t.reader.Read(p)
|
||||
}
|
||||
|
||||
var _ io.Reader = &Tail{}
|
||||
var _ io.ReadCloser = &Tail{}
|
||||
|
||||
// Close stops watching and closes the file
|
||||
func (t *Tail) Close() {
|
||||
func (t *Tail) Close() error {
|
||||
close(t.stop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tail) attemptOpen() error {
|
||||
t.readerLock.Lock()
|
||||
defer t.readerLock.Unlock()
|
||||
t.reader = nil
|
||||
t.readerErr = nil
|
||||
attempt := 0
|
||||
var lastErr error
|
||||
for interval := defaultRetryInterval; ; interval *= 2 {
|
||||
attempt++
|
||||
glog.V(4).Infof("Opening %s (attempt %d)", t.filename, attempt)
|
||||
@@ -92,6 +105,9 @@ func (t *Tail) attemptOpen() error {
|
||||
t.reader = bufio.NewReader(t.file)
|
||||
return nil
|
||||
}
|
||||
lastErr = err
|
||||
glog.V(4).Infof("open log file %s error: %v", t.filename, err)
|
||||
|
||||
if interval >= maxRetryInterval {
|
||||
break
|
||||
}
|
||||
@@ -102,7 +118,7 @@ func (t *Tail) attemptOpen() error {
|
||||
return fmt.Errorf("watch was cancelled")
|
||||
}
|
||||
}
|
||||
err := fmt.Errorf("can't open log file %s", t.filename)
|
||||
err := fmt.Errorf("can't open log file %s: %v", t.filename, lastErr)
|
||||
t.readerErr = err
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user