Updating dependency github.com/google/cadvisor to version 6a8d614

Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
Davanum Srinivas
2020-05-14 17:29:52 -04:00
parent 449810c785
commit 082578c22f
109 changed files with 3417 additions and 1312 deletions

View File

@@ -37,6 +37,8 @@ import (
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/nvm"
"github.com/google/cadvisor/perf"
"github.com/google/cadvisor/stats"
"github.com/google/cadvisor/utils/oomparser"
"github.com/google/cadvisor/utils/sysfs"
@@ -44,7 +46,7 @@ import (
"github.com/google/cadvisor/watcher"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/klog"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
@@ -121,7 +123,7 @@ type Manager interface {
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) ([]*info.Event, error)
CloseEventChannel(watch_id int)
CloseEventChannel(watchID int)
// Get status information about docker.
DockerInfo() (info.DockerStatus, error)
@@ -133,8 +135,14 @@ type Manager interface {
DebugInfo() map[string][]string
}
// Housekeeping configuration for the manager
type HouskeepingConfig = struct {
Interval *time.Duration
AllowDynamic *bool
}
// New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, includedMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string) (Manager, error) {
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig HouskeepingConfig, includedMetricsSet container.MetricSet, collectorHTTPClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string, perfEventsFile string) (Manager, error) {
if memoryCache == nil {
return nil, fmt.Errorf("manager requires memory storage")
}
@@ -176,12 +184,12 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
cadvisorContainer: selfContainer,
inHostNamespace: inHostNamespace,
startupTime: time.Now(),
maxHousekeepingInterval: maxHousekeepingInterval,
allowDynamicHousekeeping: allowDynamicHousekeeping,
maxHousekeepingInterval: *houskeepingConfig.Interval,
allowDynamicHousekeeping: *houskeepingConfig.AllowDynamic,
includedMetrics: includedMetricsSet,
containerWatchers: []watcher.ContainerWatcher{},
eventsChannel: eventsChannel,
collectorHttpClient: collectorHttpClient,
collectorHTTPClient: collectorHTTPClient,
nvidiaManager: accelerators.NewNvidiaManager(),
rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList,
}
@@ -193,6 +201,11 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
newManager.machineInfo = *machineInfo
klog.V(1).Infof("Machine: %+v", newManager.machineInfo)
newManager.perfManager, err = perf.NewManager(perfEventsFile, machineInfo.NumCores)
if err != nil {
return nil, err
}
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
@@ -230,17 +243,18 @@ type manager struct {
includedMetrics container.MetricSet
containerWatchers []watcher.ContainerWatcher
eventsChannel chan watcher.ContainerEvent
collectorHttpClient *http.Client
collectorHTTPClient *http.Client
nvidiaManager stats.Manager
perfManager stats.Manager
// List of raw container cgroup path prefix whitelist.
rawContainerCgroupPathPrefixWhiteList []string
}
// Start the container manager.
func (self *manager) Start() error {
self.containerWatchers = container.InitializePlugins(self, self.fsInfo, self.includedMetrics)
func (m *manager) Start() error {
m.containerWatchers = container.InitializePlugins(m, m.fsInfo, m.includedMetrics)
err := raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
err := raw.Register(m, m.fsInfo, m.includedMetrics, m.rawContainerCgroupPathPrefixWhiteList)
if err != nil {
klog.Errorf("Registration of the raw container factory failed: %v", err)
}
@@ -249,10 +263,10 @@ func (self *manager) Start() error {
if err != nil {
return err
}
self.containerWatchers = append(self.containerWatchers, rawWatcher)
m.containerWatchers = append(m.containerWatchers, rawWatcher)
// Watch for OOMs.
err = self.watchForNewOoms()
err = m.watchForNewOoms()
if err != nil {
klog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err)
}
@@ -262,16 +276,13 @@ func (self *manager) Start() error {
return nil
}
// Setup collection of nvidia GPU metrics if any of them are attached to the machine.
self.nvidiaManager.Setup()
// Create root and then recover all containers.
err = self.createContainer("/", watcher.Raw)
err = m.createContainer("/", watcher.Raw)
if err != nil {
return err
}
klog.V(2).Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/")
err = m.detectSubcontainers("/")
if err != nil {
return err
}
@@ -279,54 +290,63 @@ func (self *manager) Start() error {
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
err = m.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
m.quitChannels = append(m.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
m.quitChannels = append(m.quitChannels, quitGlobalHousekeeping)
go m.globalHousekeeping(quitGlobalHousekeeping)
quitUpdateMachineInfo := make(chan error)
self.quitChannels = append(self.quitChannels, quitUpdateMachineInfo)
go self.updateMachineInfo(quitUpdateMachineInfo)
m.quitChannels = append(m.quitChannels, quitUpdateMachineInfo)
go m.updateMachineInfo(quitUpdateMachineInfo)
return nil
}
func (self *manager) Stop() error {
defer self.nvidiaManager.Destroy()
func (m *manager) Stop() error {
defer m.nvidiaManager.Destroy()
defer m.destroyPerfCollectors()
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
for i, c := range m.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
m.quitChannels = m.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
m.quitChannels = make([]chan error, 0, 2)
nvm.Finalize()
perf.Finalize()
return nil
}
func (self *manager) updateMachineInfo(quit chan error) {
func (m *manager) destroyPerfCollectors() {
for _, container := range m.containers {
container.perfCollector.Destroy()
}
}
func (m *manager) updateMachineInfo(quit chan error) {
ticker := time.NewTicker(*updateMachineInfoInterval)
for {
select {
case <-ticker.C:
info, err := machine.Info(self.sysFs, self.fsInfo, self.inHostNamespace)
info, err := machine.Info(m.sysFs, m.fsInfo, m.inHostNamespace)
if err != nil {
klog.Errorf("Could not get machine info: %v", err)
break
}
self.machineMu.Lock()
self.machineInfo = *info
self.machineMu.Unlock()
m.machineMu.Lock()
m.machineInfo = *info
m.machineMu.Unlock()
klog.V(5).Infof("Update machine info: %+v", *info)
case <-quit:
ticker.Stop()
@@ -336,21 +356,21 @@ func (self *manager) updateMachineInfo(quit chan error) {
}
}
func (self *manager) globalHousekeeping(quit chan error) {
func (m *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if *globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = *globalHousekeepingInterval / 2
}
ticker := time.Tick(*globalHousekeepingInterval)
ticker := time.NewTicker(*globalHousekeepingInterval)
for {
select {
case t := <-ticker:
case t := <-ticker.C:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
err := m.detectSubcontainers("/")
if err != nil {
klog.Errorf("Failed to detect containers: %s", err)
}
@@ -369,15 +389,15 @@ func (self *manager) globalHousekeeping(quit chan error) {
}
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
func (m *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
cont, ok = m.containers[namespacedContainerName{
Name: containerName,
}]
}()
@@ -387,8 +407,8 @@ func (self *manager) getContainerData(containerName string) (*containerData, err
return cont, nil
}
func (self *manager) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
conts, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
conts, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@@ -404,8 +424,8 @@ func (self *manager) GetDerivedStats(containerName string, options v2.RequestOpt
return stats, errs.OrNil()
}
func (self *manager) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
conts, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
conts, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@@ -416,43 +436,43 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp
if err != nil {
errs.append(name, "GetInfo", err)
}
spec := self.getV2Spec(cinfo)
spec := m.getV2Spec(cinfo)
specs[name] = spec
}
return specs, errs.OrNil()
}
// Get V2 container spec from v1 container info.
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
spec := self.getAdjustedSpec(cinfo)
func (m *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
spec := m.getAdjustedSpec(cinfo)
return v2.ContainerSpecFromV1(&spec, cinfo.Aliases, cinfo.Namespace)
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
func (m *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
self.machineMu.RLock()
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
self.machineMu.RUnlock()
m.machineMu.RLock()
spec.Memory.Limit = uint64(m.machineInfo.MemoryCapacity)
m.machineMu.RUnlock()
}
}
return spec
}
func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
func (m *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := m.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont, query)
return m.containerDataToContainerInfo(cont, query)
}
func (self *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
containers, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
containers, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@@ -469,9 +489,9 @@ func (self *manager) GetContainerInfoV2(containerName string, options v2.Request
infos[name] = result
continue
}
result.Spec = self.getV2Spec(cinfo)
result.Spec = m.getV2Spec(cinfo)
stats, err := self.memoryCache.RecentStats(name, nilTime, nilTime, options.Count)
stats, err := m.memoryCache.RecentStats(name, nilTime, nilTime, options.Count)
if err != nil {
errs.append(name, "RecentStats", err)
infos[name] = result
@@ -485,14 +505,14 @@ func (self *manager) GetContainerInfoV2(containerName string, options v2.Request
return infos, errs.OrNil()
}
func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
func (m *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo(true)
if err != nil {
return nil, err
}
stats, err := self.memoryCache.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
stats, err := m.memoryCache.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
if err != nil {
return nil, err
}
@@ -501,55 +521,55 @@ func (self *manager) containerDataToContainerInfo(cont *containerData, query *in
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Spec: m.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) getContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
cont, ok := self.containers[namespacedContainerName{Name: containerName}]
func (m *manager) getContainer(containerName string) (*containerData, error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
cont, ok := m.containers[namespacedContainerName{Name: containerName}]
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) getSubcontainers(containerName string) map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containersMap := make(map[string]*containerData, len(self.containers))
func (m *manager) getSubcontainers(containerName string) map[string]*containerData {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
containersMap := make(map[string]*containerData, len(m.containers))
// Get all the unique subcontainers of the specified container
matchedName := path.Join(containerName, "/")
for i := range self.containers {
name := self.containers[i].info.Name
for i := range m.containers {
name := m.containers[i].info.Name
if name == containerName || strings.HasPrefix(name, matchedName) {
containersMap[self.containers[i].info.Name] = self.containers[i]
containersMap[m.containers[i].info.Name] = m.containers[i]
}
}
return containersMap
}
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
containersMap := self.getSubcontainers(containerName)
func (m *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
containersMap := m.getSubcontainers(containerName)
containers := make([]*containerData, 0, len(containersMap))
for _, cont := range containersMap {
containers = append(containers, cont)
}
return self.containerDataSliceToContainerInfoSlice(containers, query)
return m.containerDataSliceToContainerInfoSlice(containers, query)
}
func (self *manager) getAllDockerContainers() map[string]*containerData {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers := make(map[string]*containerData, len(self.containers))
func (m *manager) getAllDockerContainers() map[string]*containerData {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
containers := make(map[string]*containerData, len(m.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
for name, cont := range m.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
@@ -557,12 +577,12 @@ func (self *manager) getAllDockerContainers() map[string]*containerData {
return containers
}
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
containers := self.getAllDockerContainers()
func (m *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
containers := m.getAllDockerContainers()
output := make(map[string]info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont, query)
inf, err := m.containerDataToContainerInfo(cont, query)
if err != nil {
// Ignore the error because of race condition and return best-effort result.
if err == memory.ErrDataNotFound {
@@ -576,19 +596,19 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
return output, nil
}
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
func (m *manager) getDockerContainer(containerName string) (*containerData, error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
cont, ok := m.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
// Look for container by short prefix name if no exact match found.
if !ok {
for contName, c := range self.containers {
for contName, c := range m.containers {
if contName.Namespace == docker.DockerNamespace && strings.HasPrefix(contName.Name, containerName) {
if cont == nil {
cont = c
@@ -606,20 +626,20 @@ func (self *manager) getDockerContainer(containerName string) (*containerData, e
return cont, nil
}
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
container, err := self.getDockerContainer(containerName)
func (m *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
container, err := m.getDockerContainer(containerName)
if err != nil {
return info.ContainerInfo{}, err
}
inf, err := self.containerDataToContainerInfo(container, query)
inf, err := m.containerDataToContainerInfo(container, query)
if err != nil {
return info.ContainerInfo{}, err
}
return *inf, nil
}
func (self *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
func (m *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("no containers found")
}
@@ -627,7 +647,7 @@ func (self *manager) containerDataSliceToContainerInfoSlice(containers []*contai
// Get the info for each container.
output := make([]*info.ContainerInfo, 0, len(containers))
for i := range containers {
cinfo, err := self.containerDataToContainerInfo(containers[i], query)
cinfo, err := m.containerDataToContainerInfo(containers[i], query)
if err != nil {
// Skip containers with errors, we try to degrade gracefully.
continue
@@ -638,8 +658,8 @@ func (self *manager) containerDataSliceToContainerInfoSlice(containers []*contai
return output, nil
}
func (self *manager) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
containers, err := self.getRequestedContainers(containerName, options)
func (m *manager) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
containers, err := m.getRequestedContainers(containerName, options)
if err != nil {
return nil, err
}
@@ -649,7 +669,7 @@ func (self *manager) GetRequestedContainersInfo(containerName string, options v2
NumStats: options.Count,
}
for name, data := range containers {
info, err := self.containerDataToContainerInfo(data, &query)
info, err := m.containerDataToContainerInfo(data, &query)
if err != nil {
errs.append(name, "containerDataToContainerInfo", err)
}
@@ -658,26 +678,26 @@ func (self *manager) GetRequestedContainersInfo(containerName string, options v2
return containersMap, errs.OrNil()
}
func (self *manager) getRequestedContainers(containerName string, options v2.RequestOptions) (map[string]*containerData, error) {
func (m *manager) getRequestedContainers(containerName string, options v2.RequestOptions) (map[string]*containerData, error) {
containersMap := make(map[string]*containerData)
switch options.IdType {
case v2.TypeName:
if options.Recursive == false {
cont, err := self.getContainer(containerName)
if !options.Recursive {
cont, err := m.getContainer(containerName)
if err != nil {
return containersMap, err
}
containersMap[cont.info.Name] = cont
} else {
containersMap = self.getSubcontainers(containerName)
containersMap = m.getSubcontainers(containerName)
if len(containersMap) == 0 {
return containersMap, fmt.Errorf("unknown container: %q", containerName)
}
}
case v2.TypeDocker:
if options.Recursive == false {
if !options.Recursive {
containerName = strings.TrimPrefix(containerName, "/")
cont, err := self.getDockerContainer(containerName)
cont, err := m.getDockerContainer(containerName)
if err != nil {
return containersMap, err
}
@@ -686,7 +706,7 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
if containerName != "/" {
return containersMap, fmt.Errorf("invalid request for docker container %q with subcontainers", containerName)
}
containersMap = self.getAllDockerContainers()
containersMap = m.getAllDockerContainers()
}
default:
return containersMap, fmt.Errorf("invalid request type %q", options.IdType)
@@ -706,32 +726,32 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
return containersMap, nil
}
func (self *manager) GetDirFsInfo(dir string) (v2.FsInfo, error) {
device, err := self.fsInfo.GetDirFsDevice(dir)
func (m *manager) GetDirFsInfo(dir string) (v2.FsInfo, error) {
device, err := m.fsInfo.GetDirFsDevice(dir)
if err != nil {
return v2.FsInfo{}, fmt.Errorf("failed to get device for dir %q: %v", dir, err)
}
return self.getFsInfoByDeviceName(device.Device)
return m.getFsInfoByDeviceName(device.Device)
}
func (self *manager) GetFsInfoByFsUUID(uuid string) (v2.FsInfo, error) {
device, err := self.fsInfo.GetDeviceInfoByFsUUID(uuid)
func (m *manager) GetFsInfoByFsUUID(uuid string) (v2.FsInfo, error) {
device, err := m.fsInfo.GetDeviceInfoByFsUUID(uuid)
if err != nil {
return v2.FsInfo{}, err
}
return self.getFsInfoByDeviceName(device.Device)
return m.getFsInfoByDeviceName(device.Device)
}
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
func (m *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
var empty time.Time
// Get latest data from filesystems hanging off root container.
stats, err := self.memoryCache.RecentStats("/", empty, empty, 1)
stats, err := m.memoryCache.RecentStats("/", empty, empty, 1)
if err != nil {
return nil, err
}
dev := ""
if len(label) != 0 {
dev, err = self.fsInfo.GetDeviceForLabel(label)
dev, err = m.fsInfo.GetDeviceForLabel(label)
if err != nil {
return nil, err
}
@@ -742,11 +762,11 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
if len(label) != 0 && fs.Device != dev {
continue
}
mountpoint, err := self.fsInfo.GetMountpointForDevice(fs.Device)
mountpoint, err := m.fsInfo.GetMountpointForDevice(fs.Device)
if err != nil {
return nil, err
}
labels, err := self.fsInfo.GetLabelsForDevice(fs.Device)
labels, err := m.fsInfo.GetLabelsForDevice(fs.Device)
if err != nil {
return nil, err
}
@@ -772,8 +792,7 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
m.machineMu.RLock()
defer m.machineMu.RUnlock()
// Copy and return the MachineInfo.
return &m.machineInfo, nil
return m.machineInfo.Clone(), nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
@@ -785,18 +804,15 @@ func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
}
func (m *manager) Exists(containerName string) bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
m.containersLock.RLock()
defer m.containersLock.RUnlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
_, ok := m.containers[namespacedName]
if ok {
return true
}
return false
return ok
}
func (m *manager) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
@@ -831,7 +847,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
klog.V(4).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHTTPClient)
if err != nil {
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
}
@@ -840,7 +856,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
}
} else {
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHTTPClient)
if err != nil {
return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
}
@@ -853,35 +869,6 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
return nil
}
// Enables overwriting an existing containerData/Handler object for a given containerName.
// Can't use createContainer as it just returns if a given containerName has a handler already.
// Ex: rkt handler will want to take priority over the raw handler, but the raw handler might be created first.
// Only allow raw handler to be overridden
func (m *manager) overrideContainer(containerName string, watchSource watcher.ContainerWatchSource) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
if _, ok := m.containers[namespacedName]; ok {
containerData := m.containers[namespacedName]
if containerData.handler.Type() != container.ContainerTypeRaw {
return nil
}
err := m.destroyContainerLocked(containerName)
if err != nil {
return fmt.Errorf("overrideContainer: failed to destroy containerData/handler for %v: %v", containerName, err)
}
}
return m.createContainerLocked(containerName, watchSource)
}
// Create a container.
func (m *manager) createContainer(containerName string, watchSource watcher.ContainerWatchSource) error {
m.containersLock.Lock()
@@ -926,7 +913,17 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
} else {
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
if err != nil {
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %s: %s", cont.info.Name, err)
}
}
perfCgroupPath, err := handler.GetCgroupPath("perf_event")
if err != nil {
klog.Warningf("Error getting perf_event cgroup path: %q", err)
} else {
cont.perfCollector, err = m.perfManager.GetCollector(perfCgroupPath)
if err != nil {
klog.Infof("perf_event metrics will not be available for container %s: %s", cont.info.Name, err)
}
}
}
@@ -1100,16 +1097,16 @@ func (m *manager) detectSubcontainers(containerName string) error {
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
for _, watcher := range self.containerWatchers {
err := watcher.Start(self.eventsChannel)
func (m *manager) watchForNewContainers(quit chan error) error {
for _, watcher := range m.containerWatchers {
err := watcher.Start(m.eventsChannel)
if err != nil {
return err
}
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err := self.detectSubcontainers("/")
err := m.detectSubcontainers("/")
if err != nil {
return err
}
@@ -1118,15 +1115,15 @@ func (self *manager) watchForNewContainers(quit chan error) error {
go func() {
for {
select {
case event := <-self.eventsChannel:
case event := <-m.eventsChannel:
switch {
case event.EventType == watcher.ContainerAdd:
switch event.WatchSource {
default:
err = self.createContainer(event.Name, event.WatchSource)
err = m.createContainer(event.Name, event.WatchSource)
}
case event.EventType == watcher.ContainerDelete:
err = self.destroyContainer(event.Name)
err = m.destroyContainer(event.Name)
}
if err != nil {
klog.Warningf("Failed to process watch event %+v: %v", event, err)
@@ -1135,7 +1132,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
var errs partialFailure
// Stop processing events if asked to quit.
for i, watcher := range self.containerWatchers {
for i, watcher := range m.containerWatchers {
err := watcher.Stop()
if err != nil {
errs.append(fmt.Sprintf("watcher %d", i), "Stop", err)
@@ -1155,7 +1152,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
return nil
}
func (self *manager) watchForNewOoms() error {
func (m *manager) watchForNewOoms() error {
klog.V(2).Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
@@ -1172,7 +1169,7 @@ func (self *manager) watchForNewOoms() error {
Timestamp: oomInstance.TimeOfDeath,
EventType: info.EventOom,
}
err := self.eventHandler.AddEvent(newEvent)
err := m.eventHandler.AddEvent(newEvent)
if err != nil {
klog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err)
}
@@ -1189,7 +1186,7 @@ func (self *manager) watchForNewOoms() error {
},
},
}
err = self.eventHandler.AddEvent(newEvent)
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
}
@@ -1199,18 +1196,18 @@ func (self *manager) watchForNewOoms() error {
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return self.eventHandler.WatchEvents(request)
func (m *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {
return m.eventHandler.WatchEvents(request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return self.eventHandler.GetEvents(request)
func (m *manager) GetPastEvents(request *events.Request) ([]*info.Event, error) {
return m.eventHandler.GetEvents(request)
}
// called by the api when a client is no longer listening to the channel
func (self *manager) CloseEventChannel(watch_id int) {
self.eventHandler.StopWatch(watch_id)
func (m *manager) CloseEventChannel(watchID int) {
m.eventHandler.StopWatch(watchID)
}
// Parses the events StoragePolicy from the flags.
@@ -1303,12 +1300,12 @@ func (m *manager) DebugInfo() map[string][]string {
return debugInfo
}
func (self *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error) {
mountPoint, err := self.fsInfo.GetMountpointForDevice(deviceName)
func (m *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error) {
mountPoint, err := m.fsInfo.GetMountpointForDevice(deviceName)
if err != nil {
return v2.FsInfo{}, fmt.Errorf("failed to get mount point for device %q: %v", deviceName, err)
}
infos, err := self.GetFsInfo("")
infos, err := m.GetFsInfo("")
if err != nil {
return v2.FsInfo{}, err
}
@@ -1322,22 +1319,22 @@ func (self *manager) getFsInfoByDeviceName(deviceName string) (v2.FsInfo, error)
func getVersionInfo() (*info.VersionInfo, error) {
kernel_version := machine.KernelVersion()
container_os := machine.ContainerOsVersion()
docker_version, err := docker.VersionString()
kernelVersion := machine.KernelVersion()
osVersion := machine.ContainerOsVersion()
dockerVersion, err := docker.VersionString()
if err != nil {
return nil, err
}
docker_api_version, err := docker.APIVersionString()
dockerAPIVersion, err := docker.APIVersionString()
if err != nil {
return nil, err
}
return &info.VersionInfo{
KernelVersion: kernel_version,
ContainerOsVersion: container_os,
DockerVersion: docker_version,
DockerAPIVersion: docker_api_version,
KernelVersion: kernelVersion,
ContainerOsVersion: osVersion,
DockerVersion: dockerVersion,
DockerAPIVersion: dockerAPIVersion,
CadvisorVersion: version.Info["version"],
CadvisorRevision: version.Info["revision"],
}, nil