vendor: cadvisor v0.39.0
Main upgrades: - github.com/opencontainers/runc v1.0.0-rc93 - github.com/containerd/containerd v1.4.4 - github.com/docker/docker v20.10.2 - github.com/mrunalp/fileutils v0.5.0 - github.com/opencontainers/selinux v1.8.0 - github.com/cilium/ebpf v0.2.0
This commit is contained in:
34
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
34
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -68,6 +69,16 @@ func findFileInAncestorDir(current, file, limit string) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var bootTime = func() time.Time {
|
||||
now := time.Now()
|
||||
var sysinfo unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&sysinfo); err != nil {
|
||||
return now
|
||||
}
|
||||
sinceBoot := time.Duration(sysinfo.Uptime) * time.Second
|
||||
return now.Add(-1 * sinceBoot).Truncate(time.Minute)
|
||||
}()
|
||||
|
||||
func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem bool) (info.ContainerSpec, error) {
|
||||
var spec info.ContainerSpec
|
||||
|
||||
@@ -75,17 +86,28 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
// Get the lowest creation time from all hierarchies as the container creation time.
|
||||
now := time.Now()
|
||||
lowestTime := now
|
||||
for _, cgroupPath := range cgroupPaths {
|
||||
// The modified time of the cgroup directory changes whenever a subcontainer is created.
|
||||
for _, cgroupPathDir := range cgroupPaths {
|
||||
dir, err := os.Stat(cgroupPathDir)
|
||||
if err == nil && dir.ModTime().Before(lowestTime) {
|
||||
lowestTime = dir.ModTime()
|
||||
}
|
||||
// The modified time of the cgroup directory sometimes changes whenever a subcontainer is created.
|
||||
// eg. /docker will have creation time matching the creation of latest docker container.
|
||||
// Use clone_children as a workaround as it isn't usually modified. It is only likely changed
|
||||
// immediately after creating a container.
|
||||
cgroupPath = path.Join(cgroupPath, "cgroup.clone_children")
|
||||
fi, err := os.Stat(cgroupPath)
|
||||
// Use clone_children/events as a workaround as it isn't usually modified. It is only likely changed
|
||||
// immediately after creating a container. If the directory modified time is lower, we use that.
|
||||
cgroupPathFile := path.Join(cgroupPathDir, "cgroup.clone_children")
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
cgroupPathFile = path.Join(cgroupPathDir, "cgroup.events")
|
||||
}
|
||||
fi, err := os.Stat(cgroupPathFile)
|
||||
if err == nil && fi.ModTime().Before(lowestTime) {
|
||||
lowestTime = fi.ModTime()
|
||||
}
|
||||
}
|
||||
if lowestTime.Before(bootTime) {
|
||||
lowestTime = bootTime
|
||||
}
|
||||
|
||||
if lowestTime != now {
|
||||
spec.CreationTime = lowestTime
|
||||
}
|
||||
|
30
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
30
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -347,23 +347,25 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics
|
||||
var (
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
thinPoolName string
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
)
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
// Safe to ignore error - driver status should always be populated.
|
||||
status, _ := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
// Safe to ignore error - driver status should always be populated.
|
||||
status, _ := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
var zfsWatcher *zfs.ZfsWatcher
|
||||
if storageDriver(dockerInfo.Driver) == zfsStorageDriver {
|
||||
zfsWatcher, err = startZfsWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("zfs filesystem stats will not be reported: %v", err)
|
||||
if storageDriver(dockerInfo.Driver) == zfsStorageDriver {
|
||||
zfsWatcher, err = startZfsWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("zfs filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
37
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
37
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -398,11 +398,14 @@ func (h *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fsType string
|
||||
)
|
||||
|
||||
var fsInfo *info.FsInfo
|
||||
|
||||
// Docker does not impose any filesystem limits for containers. So use capacity as limit.
|
||||
for _, fs := range mi.Filesystems {
|
||||
if fs.Device == device {
|
||||
limit = fs.Capacity
|
||||
fsType = fs.Type
|
||||
fsInfo = &fs
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -413,11 +416,45 @@ func (h *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fsStat.Usage = usage.TotalUsageBytes
|
||||
fsStat.Inodes = usage.InodeUsage
|
||||
|
||||
if fsInfo != nil {
|
||||
fileSystems, err := h.fsInfo.GetGlobalFsInfo()
|
||||
|
||||
if err == nil {
|
||||
addDiskStats(fileSystems, fsInfo, &fsStat)
|
||||
} else {
|
||||
klog.Errorf("Unable to obtain diskstats for filesystem %s: %v", fsStat.Device, err)
|
||||
}
|
||||
}
|
||||
|
||||
stats.Filesystem = append(stats.Filesystem, fsStat)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addDiskStats(fileSystems []fs.Fs, fsInfo *info.FsInfo, fsStats *info.FsStats) {
|
||||
if fsInfo == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, fileSys := range fileSystems {
|
||||
if fsInfo.DeviceMajor == fileSys.DiskStats.Major &&
|
||||
fsInfo.DeviceMinor == fileSys.DiskStats.Minor {
|
||||
fsStats.ReadsCompleted = fileSys.DiskStats.ReadsCompleted
|
||||
fsStats.ReadsMerged = fileSys.DiskStats.ReadsMerged
|
||||
fsStats.SectorsRead = fileSys.DiskStats.SectorsRead
|
||||
fsStats.ReadTime = fileSys.DiskStats.ReadTime
|
||||
fsStats.WritesCompleted = fileSys.DiskStats.WritesCompleted
|
||||
fsStats.WritesMerged = fileSys.DiskStats.WritesMerged
|
||||
fsStats.SectorsWritten = fileSys.DiskStats.SectorsWritten
|
||||
fsStats.WriteTime = fileSys.DiskStats.WriteTime
|
||||
fsStats.IoInProgress = fileSys.DiskStats.IoInProgress
|
||||
fsStats.IoTime = fileSys.DiskStats.IoTime
|
||||
fsStats.WeightedIoTime = fileSys.DiskStats.WeightedIoTime
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||
func (h *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := h.libcontainerHandler.GetStats()
|
||||
|
2
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
2
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -63,6 +63,7 @@ const (
|
||||
ReferencedMemoryMetrics MetricKind = "referenced_memory"
|
||||
CPUTopologyMetrics MetricKind = "cpu_topology"
|
||||
ResctrlMetrics MetricKind = "resctrl"
|
||||
CPUSetMetrics MetricKind = "cpuset"
|
||||
)
|
||||
|
||||
// AllMetrics represents all kinds of metrics that cAdvisor supported.
|
||||
@@ -87,6 +88,7 @@ var AllMetrics = MetricSet{
|
||||
ReferencedMemoryMetrics: struct{}{},
|
||||
CPUTopologyMetrics: struct{}{},
|
||||
ResctrlMetrics: struct{}{},
|
||||
CPUSetMetrics: struct{}{},
|
||||
}
|
||||
|
||||
func (mk MetricKind) String() string {
|
||||
|
33
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
33
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -70,21 +70,22 @@ func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedM
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
var cgroupStats *cgroups.Stats
|
||||
readCgroupStats := true
|
||||
ignoreStatsError := false
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
// On cgroup v2 there are no stats at the root cgroup
|
||||
// so check whether it is the root cgroup
|
||||
// On cgroup v2 the root cgroup stats have been introduced in recent kernel versions,
|
||||
// so not all kernel versions have all the data. This means that stat fetching can fail
|
||||
// due to lacking cgroup stat files, but that some data is provided.
|
||||
if h.cgroupManager.Path("") == fs2.UnifiedMountpoint {
|
||||
readCgroupStats = false
|
||||
ignoreStatsError = true
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if readCgroupStats {
|
||||
cgroupStats, err = h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
|
||||
cgroupStats, err := h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
if !ignoreStatsError {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Ignoring errors when gathering stats for root cgroup since some controllers don't have stats on the root cgroup: %v", err)
|
||||
}
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
@@ -793,7 +794,12 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
|
||||
if s.MemoryStats.UseHierarchy {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["file"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["anon"]
|
||||
ret.Memory.Swap = s.MemoryStats.SwapUsage.Usage
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["file_mapped"]
|
||||
} else if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
@@ -829,6 +835,10 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func setCPUSetStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.CpuSet.MemoryMigrate = s.CPUSetStats.MemoryMigrate
|
||||
}
|
||||
|
||||
func getNumaStats(memoryStats map[uint8]uint64) map[uint8]uint64 {
|
||||
stats := make(map[uint8]uint64, len(memoryStats))
|
||||
for node, usage := range memoryStats {
|
||||
@@ -906,6 +916,9 @@ func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics co
|
||||
if includedMetrics.Has(container.HugetlbUsageMetrics) {
|
||||
setHugepageStats(s, ret)
|
||||
}
|
||||
if includedMetrics.Has(container.CPUSetMetrics) {
|
||||
setCPUSetStats(s, ret)
|
||||
}
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
setNetworkStats(libcontainerStats, ret)
|
||||
|
144
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
144
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -33,9 +33,9 @@ import (
|
||||
"github.com/google/cadvisor/devicemapper"
|
||||
"github.com/google/cadvisor/utils"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
mount "github.com/moby/sys/mountinfo"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/mount"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +85,7 @@ type RealFsInfo struct {
|
||||
// Labels are intent-specific tags that are auto-detected.
|
||||
labels map[string]string
|
||||
// Map from mountpoint to mount information.
|
||||
mounts map[string]mount.MountInfo
|
||||
mounts map[string]mount.Info
|
||||
// devicemapper client
|
||||
dmsetup devicemapper.DmsetupClient
|
||||
// fsUUIDToDeviceName is a map from the filesystem UUID to its device name.
|
||||
@@ -93,7 +93,11 @@ type RealFsInfo struct {
|
||||
}
|
||||
|
||||
func NewFsInfo(context Context) (FsInfo, error) {
|
||||
mounts, err := mount.ParseMountInfo("/proc/self/mountinfo")
|
||||
fileReader, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts, err := mount.GetMountsFromReader(fileReader, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -110,13 +114,13 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
fsInfo := &RealFsInfo{
|
||||
partitions: processMounts(mounts, excluded),
|
||||
labels: make(map[string]string),
|
||||
mounts: make(map[string]mount.MountInfo),
|
||||
mounts: make(map[string]mount.Info),
|
||||
dmsetup: devicemapper.NewDmsetupClient(),
|
||||
fsUUIDToDeviceName: fsUUIDToDeviceName,
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
fsInfo.mounts[mount.MountPoint] = mount
|
||||
for _, mnt := range mounts {
|
||||
fsInfo.mounts[mnt.Mountpoint] = *mnt
|
||||
}
|
||||
|
||||
// need to call this before the log line below printing out the partitions, as this function may
|
||||
@@ -147,10 +151,10 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) {
|
||||
|
||||
fsUUIDToDeviceName := make(map[string]string)
|
||||
for _, file := range files {
|
||||
path := filepath.Join(dir, file.Name())
|
||||
target, err := os.Readlink(path)
|
||||
fpath := filepath.Join(dir, file.Name())
|
||||
target, err := os.Readlink(fpath)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to resolve symlink for %q", path)
|
||||
klog.Warningf("Failed to resolve symlink for %q", fpath)
|
||||
continue
|
||||
}
|
||||
device, err := filepath.Abs(filepath.Join(dir, target))
|
||||
@@ -162,11 +166,12 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) {
|
||||
return fsUUIDToDeviceName, nil
|
||||
}
|
||||
|
||||
func processMounts(mounts []mount.MountInfo, excludedMountpointPrefixes []string) map[string]partition {
|
||||
func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) map[string]partition {
|
||||
partitions := make(map[string]partition)
|
||||
|
||||
supportedFsType := map[string]bool{
|
||||
// all ext systems are checked through prefix.
|
||||
// all ext and nfs systems are checked through prefix
|
||||
// because there are a number of families (e.g., ext3, ext4, nfs3, nfs4...)
|
||||
"btrfs": true,
|
||||
"overlay": true,
|
||||
"tmpfs": true,
|
||||
@@ -174,20 +179,21 @@ func processMounts(mounts []mount.MountInfo, excludedMountpointPrefixes []string
|
||||
"zfs": true,
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if !strings.HasPrefix(mount.FsType, "ext") && !supportedFsType[mount.FsType] {
|
||||
for _, mnt := range mounts {
|
||||
if !strings.HasPrefix(mnt.FSType, "ext") && !strings.HasPrefix(mnt.FSType, "nfs") &&
|
||||
!supportedFsType[mnt.FSType] {
|
||||
continue
|
||||
}
|
||||
// Avoid bind mounts, exclude tmpfs.
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
if mount.FsType != "tmpfs" {
|
||||
if _, ok := partitions[mnt.Source]; ok {
|
||||
if mnt.FSType != "tmpfs" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
hasPrefix := false
|
||||
for _, prefix := range excludedMountpointPrefixes {
|
||||
if strings.HasPrefix(mount.MountPoint, prefix) {
|
||||
if strings.HasPrefix(mnt.Mountpoint, prefix) {
|
||||
hasPrefix = true
|
||||
break
|
||||
}
|
||||
@@ -197,31 +203,31 @@ func processMounts(mounts []mount.MountInfo, excludedMountpointPrefixes []string
|
||||
}
|
||||
|
||||
// using mountpoint to replace device once fstype it tmpfs
|
||||
if mount.FsType == "tmpfs" {
|
||||
mount.Source = mount.MountPoint
|
||||
if mnt.FSType == "tmpfs" {
|
||||
mnt.Source = mnt.Mountpoint
|
||||
}
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
if mount.FsType == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(&mount)
|
||||
if mnt.FSType == "btrfs" && mnt.Major == 0 && strings.HasPrefix(mnt.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mnt)
|
||||
if err != nil {
|
||||
klog.Warningf("%s", err)
|
||||
} else {
|
||||
mount.Major = major
|
||||
mount.Minor = minor
|
||||
mnt.Major = major
|
||||
mnt.Minor = minor
|
||||
}
|
||||
}
|
||||
|
||||
// overlay fix: Making mount source unique for all overlay mounts, using the mount's major and minor ids.
|
||||
if mount.FsType == "overlay" {
|
||||
mount.Source = fmt.Sprintf("%s_%d-%d", mount.Source, mount.Major, mount.Minor)
|
||||
if mnt.FSType == "overlay" {
|
||||
mnt.Source = fmt.Sprintf("%s_%d-%d", mnt.Source, mnt.Major, mnt.Minor)
|
||||
}
|
||||
|
||||
partitions[mount.Source] = partition{
|
||||
fsType: mount.FsType,
|
||||
mountpoint: mount.MountPoint,
|
||||
major: uint(mount.Major),
|
||||
minor: uint(mount.Minor),
|
||||
partitions[mnt.Source] = partition{
|
||||
fsType: mnt.FSType,
|
||||
mountpoint: mnt.Mountpoint,
|
||||
major: uint(mnt.Major),
|
||||
minor: uint(mnt.Minor),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,12 +262,12 @@ func (i *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string, *
|
||||
}
|
||||
|
||||
// addSystemRootLabel attempts to determine which device contains the mount for /.
|
||||
func (i *RealFsInfo) addSystemRootLabel(mounts []mount.MountInfo) {
|
||||
func (i *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
|
||||
for _, m := range mounts {
|
||||
if m.MountPoint == "/" {
|
||||
if m.Mountpoint == "/" {
|
||||
i.partitions[m.Source] = partition{
|
||||
fsType: m.FsType,
|
||||
mountpoint: m.MountPoint,
|
||||
fsType: m.FSType,
|
||||
mountpoint: m.Mountpoint,
|
||||
major: uint(m.Major),
|
||||
minor: uint(m.Minor),
|
||||
}
|
||||
@@ -272,7 +278,7 @@ func (i *RealFsInfo) addSystemRootLabel(mounts []mount.MountInfo) {
|
||||
}
|
||||
|
||||
// addDockerImagesLabel attempts to determine which device contains the mount for docker images.
|
||||
func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []mount.MountInfo) {
|
||||
func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
|
||||
dockerDev, dockerPartition, err := i.getDockerDeviceMapperInfo(context.Docker)
|
||||
if err != nil {
|
||||
klog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
@@ -285,7 +291,7 @@ func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []mount.MountI
|
||||
}
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) addCrioImagesLabel(context Context, mounts []mount.MountInfo) {
|
||||
func (i *RealFsInfo) addCrioImagesLabel(context Context, mounts []*mount.Info) {
|
||||
if context.Crio.Root != "" {
|
||||
crioPath := context.Crio.Root
|
||||
crioImagePaths := map[string]struct{}{
|
||||
@@ -324,20 +330,19 @@ func getDockerImagePaths(context Context) map[string]struct{} {
|
||||
|
||||
// This method compares the mountpoints with possible container image mount points. If a match is found,
|
||||
// the label is added to the partition.
|
||||
func (i *RealFsInfo) updateContainerImagesPath(label string, mounts []mount.MountInfo, containerImagePaths map[string]struct{}) {
|
||||
var useMount *mount.MountInfo
|
||||
func (i *RealFsInfo) updateContainerImagesPath(label string, mounts []*mount.Info, containerImagePaths map[string]struct{}) {
|
||||
var useMount *mount.Info
|
||||
for _, m := range mounts {
|
||||
if _, ok := containerImagePaths[m.MountPoint]; ok {
|
||||
if useMount == nil || (len(useMount.MountPoint) < len(m.MountPoint)) {
|
||||
useMount = new(mount.MountInfo)
|
||||
*useMount = m
|
||||
if _, ok := containerImagePaths[m.Mountpoint]; ok {
|
||||
if useMount == nil || (len(useMount.Mountpoint) < len(m.Mountpoint)) {
|
||||
useMount = m
|
||||
}
|
||||
}
|
||||
}
|
||||
if useMount != nil {
|
||||
i.partitions[useMount.Source] = partition{
|
||||
fsType: useMount.FsType,
|
||||
mountpoint: useMount.MountPoint,
|
||||
fsType: useMount.FSType,
|
||||
mountpoint: useMount.Mountpoint,
|
||||
major: uint(useMount.Major),
|
||||
minor: uint(useMount.Minor),
|
||||
}
|
||||
@@ -354,7 +359,7 @@ func (i *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
|
||||
labels := []string{}
|
||||
var labels []string
|
||||
for label, dev := range i.labels {
|
||||
if dev == device {
|
||||
labels = append(labels, label)
|
||||
@@ -462,12 +467,12 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
|
||||
deviceName := path.Join("/dev", words[2])
|
||||
|
||||
var error error
|
||||
var err error
|
||||
devInfo := make([]uint64, 2)
|
||||
for i := 0; i < len(devInfo); i++ {
|
||||
devInfo[i], error = strconv.ParseUint(words[i], 10, 64)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
devInfo[i], err = strconv.ParseUint(words[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -478,11 +483,22 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats")
|
||||
}
|
||||
for i := offset; i < wordLength; i++ {
|
||||
stats[i-offset], error = strconv.ParseUint(words[i], 10, 64)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
stats[i-offset], err = strconv.ParseUint(words[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
major64, err := strconv.ParseUint(words[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minor64, err := strconv.ParseUint(words[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskStats := DiskStats{
|
||||
MajorNum: devInfo[0],
|
||||
MinorNum: devInfo[1],
|
||||
@@ -497,6 +513,8 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
IoInProgress: stats[8],
|
||||
IoTime: stats[9],
|
||||
WeightedIoTime: stats[10],
|
||||
Major: major64,
|
||||
Minor: minor64,
|
||||
}
|
||||
diskStatsMap[deviceName] = diskStats
|
||||
}
|
||||
@@ -527,8 +545,8 @@ func (i *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
|
||||
return &DeviceInfo{deviceName, p.major, p.minor}, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) mountInfoFromDir(dir string) (*mount.MountInfo, bool) {
|
||||
mount, found := i.mounts[dir]
|
||||
func (i *RealFsInfo) mountInfoFromDir(dir string) (*mount.Info, bool) {
|
||||
mnt, found := i.mounts[dir]
|
||||
// try the parent dir if not found until we reach the root dir
|
||||
// this is an issue on btrfs systems where the directory is not
|
||||
// the subvolume
|
||||
@@ -536,15 +554,15 @@ func (i *RealFsInfo) mountInfoFromDir(dir string) (*mount.MountInfo, bool) {
|
||||
pathdir, _ := filepath.Split(dir)
|
||||
// break when we reach root
|
||||
if pathdir == "/" {
|
||||
mount, found = i.mounts["/"]
|
||||
mnt, found = i.mounts["/"]
|
||||
break
|
||||
}
|
||||
// trim "/" from the new parent path otherwise the next possible
|
||||
// filepath.Split in the loop will not split the string any further
|
||||
dir = strings.TrimSuffix(pathdir, "/")
|
||||
mount, found = i.mounts[dir]
|
||||
mnt, found = i.mounts[dir]
|
||||
}
|
||||
return &mount, found
|
||||
return &mnt, found
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
@@ -563,13 +581,13 @@ func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
mount, found := i.mountInfoFromDir(dir)
|
||||
if found && mount.FsType == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mount)
|
||||
mnt, found := i.mountInfoFromDir(dir)
|
||||
if found && mnt.FSType == "btrfs" && mnt.Major == 0 && strings.HasPrefix(mnt.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mnt)
|
||||
if err != nil {
|
||||
klog.Warningf("%s", err)
|
||||
} else {
|
||||
return &DeviceInfo{mount.Source, uint(major), uint(minor)}, nil
|
||||
return &DeviceInfo{mnt.Source, uint(major), uint(minor)}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("could not find device with major: %d, minor: %d in cached partitions map", major, minor)
|
||||
@@ -755,7 +773,7 @@ func getZfstats(poolName string) (uint64, uint64, uint64, error) {
|
||||
}
|
||||
|
||||
// Get major and minor Ids for a mount point using btrfs as filesystem.
|
||||
func getBtrfsMajorMinorIds(mount *mount.MountInfo) (int, int, error) {
|
||||
func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
|
||||
@@ -768,9 +786,9 @@ func getBtrfsMajorMinorIds(mount *mount.MountInfo) (int, int, error) {
|
||||
|
||||
klog.V(4).Infof("btrfs mount %#v", mount)
|
||||
if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
|
||||
err := syscall.Stat(mount.MountPoint, buf)
|
||||
err := syscall.Stat(mount.Mountpoint, buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("stat failed on %s with error: %s", mount.MountPoint, err)
|
||||
err = fmt.Errorf("stat failed on %s with error: %s", mount.Mountpoint, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
2
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
@@ -77,6 +77,8 @@ type DiskStats struct {
|
||||
IoInProgress uint64
|
||||
IoTime uint64
|
||||
WeightedIoTime uint64
|
||||
Major uint64
|
||||
Minor uint64
|
||||
}
|
||||
|
||||
type UsageInfo struct {
|
||||
|
6
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
6
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -399,6 +399,10 @@ type MemoryStats struct {
|
||||
HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
|
||||
}
|
||||
|
||||
type CPUSetStats struct {
|
||||
MemoryMigrate uint64 `json:"memory_migrate"`
|
||||
}
|
||||
|
||||
type MemoryNumaStats struct {
|
||||
File map[uint8]uint64 `json:"file,omitempty"`
|
||||
Anon map[uint8]uint64 `json:"anon,omitempty"`
|
||||
@@ -957,6 +961,8 @@ type ContainerStats struct {
|
||||
|
||||
// Resource Control (resctrl) statistics
|
||||
Resctrl ResctrlStats `json:"resctrl,omitempty"`
|
||||
|
||||
CpuSet CPUSetStats `json:"cpuset,omitempty"`
|
||||
}
|
||||
|
||||
func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {
|
||||
|
47
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
47
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
@@ -16,18 +16,15 @@
|
||||
package machine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
// s390/s390x changes
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
@@ -54,9 +51,6 @@ var (
|
||||
maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
||||
)
|
||||
|
||||
const sysFsCPUCoreID = "core_id"
|
||||
const sysFsCPUPhysicalPackageID = "physical_package_id"
|
||||
const sysFsCPUTopology = "topology"
|
||||
const memTypeFileName = "dimm_mem_type"
|
||||
const sizeFileName = "size"
|
||||
|
||||
@@ -66,7 +60,7 @@ func GetPhysicalCores(procInfo []byte) int {
|
||||
if numCores == 0 {
|
||||
// read number of cores from /sys/bus/cpu/devices/cpu*/topology/core_id to deal with processors
|
||||
// for which 'core id' is not available in /proc/cpuinfo
|
||||
numCores = getUniqueCPUPropertyCount(cpuBusPath, sysFsCPUCoreID)
|
||||
numCores = sysfs.GetUniqueCPUPropertyCount(cpuBusPath, sysfs.CPUCoreID)
|
||||
}
|
||||
if numCores == 0 {
|
||||
klog.Errorf("Cannot read number of physical cores correctly, number of cores set to %d", numCores)
|
||||
@@ -80,7 +74,7 @@ func GetSockets(procInfo []byte) int {
|
||||
if numSocket == 0 {
|
||||
// read number of sockets from /sys/bus/cpu/devices/cpu*/topology/physical_package_id to deal with processors
|
||||
// for which 'physical id' is not available in /proc/cpuinfo
|
||||
numSocket = getUniqueCPUPropertyCount(cpuBusPath, sysFsCPUPhysicalPackageID)
|
||||
numSocket = sysfs.GetUniqueCPUPropertyCount(cpuBusPath, sysfs.CPUPhysicalPackageID)
|
||||
}
|
||||
if numSocket == 0 {
|
||||
klog.Errorf("Cannot read number of sockets correctly, number of sockets set to %d", numSocket)
|
||||
@@ -236,39 +230,6 @@ func parseCapacity(b []byte, r *regexp.Regexp) (uint64, error) {
|
||||
return m * 1024, err
|
||||
}
|
||||
|
||||
// Looks for sysfs cpu path containing given CPU property, e.g. core_id or physical_package_id
|
||||
// and returns number of unique values of given property, exemplary usage: getting number of CPU physical cores
|
||||
func getUniqueCPUPropertyCount(cpuBusPath string, propertyName string) int {
|
||||
pathPattern := cpuBusPath + "cpu*[0-9]"
|
||||
sysCPUPaths, err := filepath.Glob(pathPattern)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot find files matching pattern (pathPattern: %s), number of unique %s set to 0", pathPattern, propertyName)
|
||||
return 0
|
||||
}
|
||||
uniques := make(map[string]bool)
|
||||
for _, sysCPUPath := range sysCPUPaths {
|
||||
onlinePath := filepath.Join(sysCPUPath, "online")
|
||||
onlineVal, err := ioutil.ReadFile(onlinePath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot determine CPU %s online state, skipping", sysCPUPath)
|
||||
continue
|
||||
}
|
||||
onlineVal = bytes.TrimSpace(onlineVal)
|
||||
if len(onlineVal) == 0 || onlineVal[0] != 49 {
|
||||
klog.Warningf("CPU %s is offline, skipping", sysCPUPath)
|
||||
continue
|
||||
}
|
||||
propertyPath := filepath.Join(sysCPUPath, sysFsCPUTopology, propertyName)
|
||||
propertyVal, err := ioutil.ReadFile(propertyPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot open %s, number of unique %s set to 0", propertyPath, propertyName)
|
||||
return 0
|
||||
}
|
||||
uniques[string(propertyVal)] = true
|
||||
}
|
||||
return len(uniques)
|
||||
}
|
||||
|
||||
// getUniqueMatchesCount returns number of unique matches in given argument using provided regular expression
|
||||
func getUniqueMatchesCount(s string, r *regexp.Regexp) int {
|
||||
matches := r.FindAllString(s, -1)
|
||||
|
227
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
227
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -38,7 +38,7 @@ import (
|
||||
"github.com/google/cadvisor/summary"
|
||||
"github.com/google/cadvisor/utils/cpuload"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/docker/go-units"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
@@ -47,9 +47,14 @@ import (
|
||||
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
// TODO: replace regular expressions with something simpler, such as strings.Split().
|
||||
// cgroup type chosen to fetch the cgroup path of a process.
|
||||
// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers.
|
||||
var cgroupPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`)
|
||||
// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers...
|
||||
var cgroupMemoryPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`)
|
||||
|
||||
// ... but there are systems (e.g. Raspberry Pi 4) where memory cgroup controller is disabled by default.
|
||||
// We should check cpu cgroup then.
|
||||
var cgroupCPUPathRegExp = regexp.MustCompile(`cpu[^:]*:(.*?)[,;$]`)
|
||||
|
||||
type containerInfo struct {
|
||||
info.ContainerReference
|
||||
@@ -138,7 +143,10 @@ func (cd *containerData) allowErrorLogging() bool {
|
||||
// periodic housekeeping to reset. This should be used sparingly, as calling OnDemandHousekeeping frequently
|
||||
// can have serious performance costs.
|
||||
func (cd *containerData) OnDemandHousekeeping(maxAge time.Duration) {
|
||||
if cd.clock.Since(cd.statsLastUpdatedTime) > maxAge {
|
||||
cd.lock.Lock()
|
||||
timeSinceStatsLastUpdate := cd.clock.Since(cd.statsLastUpdatedTime)
|
||||
cd.lock.Unlock()
|
||||
if timeSinceStatsLastUpdate > maxAge {
|
||||
housekeepingFinishedChan := make(chan struct{})
|
||||
cd.onDemandChan <- housekeepingFinishedChan
|
||||
select {
|
||||
@@ -195,20 +203,28 @@ func (cd *containerData) DerivedStats() (v2.DerivedStats, error) {
|
||||
return cd.summaryReader.DerivedStats()
|
||||
}
|
||||
|
||||
func (cd *containerData) getCgroupPath(cgroups string) (string, error) {
|
||||
func (cd *containerData) getCgroupPath(cgroups string) string {
|
||||
if cgroups == "-" {
|
||||
return "/", nil
|
||||
return "/"
|
||||
}
|
||||
if strings.HasPrefix(cgroups, "0::") {
|
||||
return cgroups[3:], nil
|
||||
return cgroups[3:]
|
||||
}
|
||||
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
matches := cgroupMemoryPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
|
||||
// return root in case of failures - memory hierarchy might not be enabled.
|
||||
return "/", nil
|
||||
klog.V(3).Infof(
|
||||
"failed to get memory cgroup path from %q, will try to get cpu cgroup path",
|
||||
cgroups,
|
||||
)
|
||||
// On some systems (e.g. Raspberry PI 4) cgroup memory controlled is disabled by default.
|
||||
matches = cgroupCPUPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof("failed to get cpu cgroup path from %q; assuming root cgroup", cgroups)
|
||||
// return root in case of failures - memory hierarchy might not be enabled.
|
||||
return "/"
|
||||
}
|
||||
}
|
||||
return string(matches[1]), nil
|
||||
return string(matches[1])
|
||||
}
|
||||
|
||||
// Returns contents of a file inside the container root.
|
||||
@@ -271,10 +287,7 @@ func (cd *containerData) getContainerPids(inHostNamespace bool) ([]string, error
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid := fields[0]
|
||||
cgroup, err := cd.getCgroupPath(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[1], err)
|
||||
}
|
||||
cgroup := cd.getCgroupPath(fields[1])
|
||||
if cd.info.Name == cgroup {
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
@@ -283,106 +296,130 @@ func (cd *containerData) getContainerPids(inHostNamespace bool) ([]string, error
|
||||
}
|
||||
|
||||
func (cd *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
|
||||
// report all processes for root.
|
||||
isRoot := cd.info.Name == "/"
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,psr,cgroup"
|
||||
out, err := cd.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedFields := 13
|
||||
return cd.parseProcessList(cadvisorContainer, inHostNamespace, out)
|
||||
}
|
||||
|
||||
func (cd *containerData) parseProcessList(cadvisorContainer string, inHostNamespace bool, out []byte) ([]v2.ProcessInfo, error) {
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
processes := []v2.ProcessInfo{}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines[1:] {
|
||||
if len(line) == 0 {
|
||||
processInfo, err := cd.parsePsLine(line, cadvisorContainer, inHostNamespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse line %s: %v", line, err)
|
||||
}
|
||||
if processInfo == nil {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
ppid, err := strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
|
||||
}
|
||||
percentCPU, err := strconv.ParseFloat(fields[4], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu percent %q: %v", fields[4], err)
|
||||
}
|
||||
percentMem, err := strconv.ParseFloat(fields[5], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory percent %q: %v", fields[5], err)
|
||||
}
|
||||
rss, err := strconv.ParseUint(fields[6], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err)
|
||||
}
|
||||
// convert to bytes
|
||||
rss *= 1024
|
||||
vs, err := strconv.ParseUint(fields[7], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err)
|
||||
}
|
||||
// convert to bytes
|
||||
vs *= 1024
|
||||
psr, err := strconv.Atoi(fields[11])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
|
||||
cgroup, err := cd.getCgroupPath(fields[12])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[11], err)
|
||||
}
|
||||
// Remove the ps command we just ran from cadvisor container.
|
||||
// Not necessary, but makes the cadvisor page look cleaner.
|
||||
if !inHostNamespace && cadvisorContainer == cgroup && fields[10] == "ps" {
|
||||
continue
|
||||
}
|
||||
var cgroupPath string
|
||||
if isRoot {
|
||||
cgroupPath = cgroup
|
||||
}
|
||||
|
||||
var fdCount int
|
||||
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(pid), "fd")
|
||||
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(processInfo.Pid), "fd")
|
||||
fds, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
|
||||
continue
|
||||
}
|
||||
fdCount = len(fds)
|
||||
processInfo.FdCount = fdCount
|
||||
|
||||
if isRoot || cd.info.Name == cgroup {
|
||||
processes = append(processes, v2.ProcessInfo{
|
||||
User: fields[0],
|
||||
Pid: pid,
|
||||
Ppid: ppid,
|
||||
StartTime: fields[3],
|
||||
PercentCpu: float32(percentCPU),
|
||||
PercentMemory: float32(percentMem),
|
||||
RSS: rss,
|
||||
VirtualSize: vs,
|
||||
Status: fields[8],
|
||||
RunningTime: fields[9],
|
||||
Cmd: fields[10],
|
||||
CgroupPath: cgroupPath,
|
||||
FdCount: fdCount,
|
||||
Psr: psr,
|
||||
})
|
||||
}
|
||||
processes = append(processes, *processInfo)
|
||||
}
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func (cd *containerData) isRoot() bool {
|
||||
return cd.info.Name == "/"
|
||||
}
|
||||
|
||||
func (cd *containerData) parsePsLine(line, cadvisorContainer string, inHostNamespace bool) (*v2.ProcessInfo, error) {
|
||||
const expectedFields = 13
|
||||
if len(line) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info := v2.ProcessInfo{}
|
||||
var err error
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
info.User = fields[0]
|
||||
info.StartTime = fields[3]
|
||||
info.Status = fields[8]
|
||||
info.RunningTime = fields[9]
|
||||
|
||||
info.Pid, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
info.Ppid, err = strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
|
||||
}
|
||||
|
||||
percentCPU, err := strconv.ParseFloat(fields[4], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu percent %q: %v", fields[4], err)
|
||||
}
|
||||
info.PercentCpu = float32(percentCPU)
|
||||
percentMem, err := strconv.ParseFloat(fields[5], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory percent %q: %v", fields[5], err)
|
||||
}
|
||||
info.PercentMemory = float32(percentMem)
|
||||
|
||||
info.RSS, err = strconv.ParseUint(fields[6], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err)
|
||||
}
|
||||
info.VirtualSize, err = strconv.ParseUint(fields[7], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err)
|
||||
}
|
||||
// convert to bytes
|
||||
info.RSS *= 1024
|
||||
info.VirtualSize *= 1024
|
||||
|
||||
// According to `man ps`: The following user-defined format specifiers may contain spaces: args, cmd, comm, command,
|
||||
// fname, ucmd, ucomm, lstart, bsdstart, start.
|
||||
// Therefore we need to be able to parse comm that consists of multiple space-separated parts.
|
||||
info.Cmd = strings.Join(fields[10:len(fields)-2], " ")
|
||||
|
||||
// These are last two parts of the line. We create a subslice of `fields` to handle comm that includes spaces.
|
||||
lastTwoFields := fields[len(fields)-2:]
|
||||
info.Psr, err = strconv.Atoi(lastTwoFields[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid psr %q: %v", lastTwoFields[0], err)
|
||||
}
|
||||
info.CgroupPath = cd.getCgroupPath(lastTwoFields[1])
|
||||
|
||||
// Remove the ps command we just ran from cadvisor container.
|
||||
// Not necessary, but makes the cadvisor page look cleaner.
|
||||
if !inHostNamespace && cadvisorContainer == info.CgroupPath && info.Cmd == "ps" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do not report processes from other containers when non-root container requested.
|
||||
if !cd.isRoot() && info.CgroupPath != cd.info.Name {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Remove cgroup information when non-root container requested.
|
||||
if !cd.isRoot() {
|
||||
info.CgroupPath = ""
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, clock clock.Clock) (*containerData, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
@@ -516,7 +553,7 @@ func (cd *containerData) housekeeping() {
|
||||
usageCPUNs := uint64(0)
|
||||
for i := range stats {
|
||||
if i > 0 {
|
||||
usageCPUNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total)
|
||||
usageCPUNs += stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total
|
||||
}
|
||||
}
|
||||
usageMemory := stats[numSamples-1].Memory.Usage
|
||||
@@ -555,6 +592,8 @@ func (cd *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepi
|
||||
klog.V(3).Infof("[%s] Housekeeping took %s", cd.info.Name, duration)
|
||||
}
|
||||
cd.notifyOnDemand()
|
||||
cd.lock.Lock()
|
||||
defer cd.lock.Unlock()
|
||||
cd.statsLastUpdatedTime = cd.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
32
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
32
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -455,6 +455,16 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.CPUSetMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, containerMetric{
|
||||
name: "container_memory_migrate",
|
||||
help: "Memory migrate status.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.CpuSet.MemoryMigrate), timestamp: s.Timestamp}}
|
||||
},
|
||||
})
|
||||
}
|
||||
if includedMetrics.Has(container.MemoryNumaMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
@@ -757,6 +767,28 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
}, s.Timestamp)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_blkio_device_usage_total",
|
||||
help: "Blkio Device bytes usage",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device", "major", "minor", "operation"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
var values metricValues
|
||||
for _, diskStat := range s.DiskIo.IoServiceBytes {
|
||||
for operation, value := range diskStat.Stats {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value),
|
||||
labels: []string{diskStat.Device,
|
||||
strconv.Itoa(int(diskStat.Major)),
|
||||
strconv.Itoa(int(diskStat.Minor)),
|
||||
operation},
|
||||
timestamp: s.Timestamp,
|
||||
})
|
||||
}
|
||||
}
|
||||
return values
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
|
16
vendor/github.com/google/cadvisor/metrics/prometheus_fake.go
generated
vendored
16
vendor/github.com/google/cadvisor/metrics/prometheus_fake.go
generated
vendored
@@ -524,6 +524,21 @@ func (p testSubcontainersInfoProvider) GetRequestedContainersInfo(string, v2.Req
|
||||
TxQueued: 0,
|
||||
},
|
||||
},
|
||||
DiskIo: info.DiskIoStats{
|
||||
IoServiceBytes: []info.PerDiskStats{{
|
||||
Device: "/dev/sdb",
|
||||
Major: 8,
|
||||
Minor: 0,
|
||||
Stats: map[string]uint64{
|
||||
"Async": 1,
|
||||
"Discard": 2,
|
||||
"Read": 3,
|
||||
"Sync": 4,
|
||||
"Total": 5,
|
||||
"Write": 6,
|
||||
},
|
||||
}},
|
||||
},
|
||||
Filesystem: []info.FsStats{
|
||||
{
|
||||
Device: "sda1",
|
||||
@@ -708,6 +723,7 @@ func (p testSubcontainersInfoProvider) GetRequestedContainersInfo(string, v2.Req
|
||||
},
|
||||
},
|
||||
},
|
||||
CpuSet: info.CPUSetStats{MemoryMigrate: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
12
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
12
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
@@ -26,19 +26,19 @@ import (
|
||||
)
|
||||
|
||||
type collector struct {
|
||||
resctrl intelrdt.IntelRdtManager
|
||||
resctrl intelrdt.Manager
|
||||
stats.NoopDestroy
|
||||
}
|
||||
|
||||
func newCollector(id string, resctrlPath string) *collector {
|
||||
collector := &collector{
|
||||
resctrl: intelrdt.IntelRdtManager{
|
||||
Config: &configs.Config{
|
||||
resctrl: intelrdt.NewManager(
|
||||
&configs.Config{
|
||||
IntelRdt: &configs.IntelRdt{},
|
||||
},
|
||||
Id: id,
|
||||
Path: resctrlPath,
|
||||
},
|
||||
id,
|
||||
resctrlPath,
|
||||
),
|
||||
}
|
||||
|
||||
return collector
|
||||
|
8
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
8
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
@@ -28,7 +28,7 @@ import (
|
||||
var (
|
||||
legacyContainerRegexp = regexp.MustCompile(`Task in (.*) killed as a result of limit of (.*)`)
|
||||
// Starting in 5.0 linux kernels, the OOM message changed
|
||||
containerRegexp = regexp.MustCompile(`oom-kill:constraint=(.*),nodemask=(.*),cpuset=(.*),mems_allowed=(.*),oom_memcg=(.*) (.*),task_memcg=(.*),task=(.*),pid=(.*),uid=(.*)`)
|
||||
containerRegexp = regexp.MustCompile(`oom-kill:constraint=(.*),nodemask=(.*),cpuset=(.*),mems_allowed=(.*),oom_memcg=(.*),task_memcg=(.*),task=(.*),pid=(.*),uid=(.*)`)
|
||||
lastLineRegexp = regexp.MustCompile(`Killed process ([0-9]+) \((.+)\)`)
|
||||
firstLineRegexp = regexp.MustCompile(`invoked oom-killer:`)
|
||||
)
|
||||
@@ -76,15 +76,15 @@ func getContainerName(line string, currentOomInstance *OomInstance) (bool, error
|
||||
// Fall back to the legacy format if it isn't found here.
|
||||
return false, getLegacyContainerName(line, currentOomInstance)
|
||||
}
|
||||
currentOomInstance.ContainerName = parsedLine[7]
|
||||
currentOomInstance.ContainerName = parsedLine[6]
|
||||
currentOomInstance.VictimContainerName = parsedLine[5]
|
||||
currentOomInstance.Constraint = parsedLine[1]
|
||||
pid, err := strconv.Atoi(parsedLine[9])
|
||||
pid, err := strconv.Atoi(parsedLine[8])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
currentOomInstance.Pid = pid
|
||||
currentOomInstance.ProcessName = parsedLine[8]
|
||||
currentOomInstance.ProcessName = parsedLine[7]
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
180
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
180
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
@@ -16,7 +16,6 @@ package sysfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -37,9 +36,21 @@ const (
|
||||
ppcDevTree = "/proc/device-tree"
|
||||
s390xDevTree = "/etc" // s390/s390x changes
|
||||
|
||||
coreIDFilePath = "/topology/core_id"
|
||||
packageIDFilePath = "/topology/physical_package_id"
|
||||
meminfoFile = "meminfo"
|
||||
meminfoFile = "meminfo"
|
||||
|
||||
sysFsCPUTopology = "topology"
|
||||
|
||||
// CPUPhysicalPackageID is a physical package id of cpu#. Typically corresponds to a physical socket number,
|
||||
// but the actual value is architecture and platform dependent.
|
||||
CPUPhysicalPackageID = "physical_package_id"
|
||||
// CPUCoreID is the CPU core ID of cpu#. Typically it is the hardware platform's identifier
|
||||
// (rather than the kernel's). The actual value is architecture and platform dependent.
|
||||
CPUCoreID = "core_id"
|
||||
|
||||
coreIDFilePath = "/" + sysFsCPUTopology + "/core_id"
|
||||
packageIDFilePath = "/" + sysFsCPUTopology + "/physical_package_id"
|
||||
|
||||
// memory size calculations
|
||||
|
||||
cpuDirPattern = "cpu*[0-9]"
|
||||
nodeDirPattern = "node*[0-9]"
|
||||
@@ -325,9 +336,9 @@ func (fs *realSysFs) GetSystemUUID() (string, error) {
|
||||
if id, err := ioutil.ReadFile(path.Join(dmiDir, "id", "product_uuid")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else if id, err = ioutil.ReadFile(path.Join(ppcDevTree, "system-id")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = ioutil.ReadFile(path.Join(ppcDevTree, "vm,uuid")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = ioutil.ReadFile(path.Join(s390xDevTree, "machine-id")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else {
|
||||
@@ -335,25 +346,152 @@ func (fs *realSysFs) GetSystemUUID() (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *realSysFs) IsCPUOnline(dir string) bool {
|
||||
cpuPath := fmt.Sprintf("%s/online", dir)
|
||||
content, err := ioutil.ReadFile(cpuPath)
|
||||
func (fs *realSysFs) IsCPUOnline(cpuPath string) bool {
|
||||
onlinePath, err := filepath.Abs(cpuPath + "/../online")
|
||||
if err != nil {
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
if ok {
|
||||
if errors.Is(pathErr.Unwrap(), os.ErrNotExist) && isZeroCPU(dir) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
klog.Warningf("unable to read %s: %s", cpuPath, err.Error())
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuPath)
|
||||
return false
|
||||
}
|
||||
trimmed := bytes.TrimSpace(content)
|
||||
return len(trimmed) == 1 && trimmed[0] == 49
|
||||
|
||||
// Quick check to determine if file exists: if it does not then kernel CPU hotplug is disabled and all CPUs are online.
|
||||
_, err = os.Stat(onlinePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to stat %s: %s", onlinePath, err)
|
||||
}
|
||||
|
||||
cpuID, err := getCPUID(cpuPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", cpuPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
isOnline, err := isCPUOnline(onlinePath, cpuID)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return false
|
||||
}
|
||||
return isOnline
|
||||
}
|
||||
|
||||
func isZeroCPU(dir string) bool {
|
||||
regex := regexp.MustCompile("cpu([0-9]*)")
|
||||
func getCPUID(dir string) (uint16, error) {
|
||||
regex := regexp.MustCompile("cpu([0-9]+)")
|
||||
matches := regex.FindStringSubmatch(dir)
|
||||
return len(matches) == 2 && matches[1] == "0"
|
||||
if len(matches) == 2 {
|
||||
id, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(id), nil
|
||||
}
|
||||
return 0, fmt.Errorf("can't get CPU ID from %s", dir)
|
||||
}
|
||||
|
||||
// isCPUOnline is copied from github.com/opencontainers/runc/libcontainer/cgroups/fs and modified to suite cAdvisor
|
||||
// needs as Apache 2.0 license allows.
|
||||
// It parses CPU list (such as: 0,3-5,10) into a struct that allows to determine quickly if CPU or particular ID is online.
|
||||
// see: https://github.com/opencontainers/runc/blob/ab27e12cebf148aa5d1ee3ad13d9fc7ae12bf0b6/libcontainer/cgroups/fs/cpuset.go#L45
|
||||
func isCPUOnline(path string, cpuID uint16) (bool, error) {
|
||||
fileContent, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(fileContent) == 0 {
|
||||
return false, fmt.Errorf("%s found to be empty", path)
|
||||
}
|
||||
|
||||
cpuList := strings.TrimSpace(string(fileContent))
|
||||
for _, s := range strings.Split(cpuList, ",") {
|
||||
splitted := strings.SplitN(s, "-", 3)
|
||||
switch len(splitted) {
|
||||
case 3:
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
case 2:
|
||||
min, err := strconv.ParseUint(splitted[0], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
max, err := strconv.ParseUint(splitted[1], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if min > max {
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
}
|
||||
for i := min; i <= max; i++ {
|
||||
if uint16(i) == cpuID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
value, err := strconv.ParseUint(s, 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if uint16(value) == cpuID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Looks for sysfs cpu path containing given CPU property, e.g. core_id or physical_package_id
|
||||
// and returns number of unique values of given property, exemplary usage: getting number of CPU physical cores
|
||||
func GetUniqueCPUPropertyCount(cpuBusPath string, propertyName string) int {
|
||||
absCPUBusPath, err := filepath.Abs(cpuBusPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot make %s absolute", cpuBusPath)
|
||||
return 0
|
||||
}
|
||||
pathPattern := absCPUBusPath + "/cpu*[0-9]"
|
||||
sysCPUPaths, err := filepath.Glob(pathPattern)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot find files matching pattern (pathPattern: %s), number of unique %s set to 0", pathPattern, propertyName)
|
||||
return 0
|
||||
}
|
||||
onlinePath, err := filepath.Abs(cpuBusPath + "/online")
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuBusPath+"/../online")
|
||||
return 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return 0
|
||||
}
|
||||
uniques := make(map[string]bool)
|
||||
for _, sysCPUPath := range sysCPUPaths {
|
||||
cpuID, err := getCPUID(sysCPUPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", sysCPUPath, err)
|
||||
return 0
|
||||
}
|
||||
isOnline, err := isCPUOnline(onlinePath, cpuID)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
klog.V(1).Infof("Unable to determine CPU online state: %s", err)
|
||||
continue
|
||||
}
|
||||
if !isOnline && !os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
propertyPath := filepath.Join(sysCPUPath, sysFsCPUTopology, propertyName)
|
||||
propertyVal, err := ioutil.ReadFile(propertyPath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 for %s of CPU %d", propertyPath, propertyName, cpuID)
|
||||
propertyVal = []byte("0")
|
||||
}
|
||||
packagePath := filepath.Join(sysCPUPath, sysFsCPUTopology, CPUPhysicalPackageID)
|
||||
packageVal, err := ioutil.ReadFile(packagePath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 %s of CPU %d", packagePath, CPUPhysicalPackageID, cpuID)
|
||||
packageVal = []byte("0")
|
||||
|
||||
}
|
||||
uniques[fmt.Sprintf("%s_%s", bytes.TrimSpace(propertyVal), bytes.TrimSpace(packageVal))] = true
|
||||
}
|
||||
return len(uniques)
|
||||
}
|
||||
|
19
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
19
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build !x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = false
|
19
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
19
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = true
|
Reference in New Issue
Block a user