deps: update runc to 1.1.0
This updates vendored runc/libcontainer to 1.1.0, and google/cadvisor to a version updated to runc 1.1.0 (google/cadvisor#3048). Changes in vendor are generated by (roughly): ./hack/pin-dependency.sh github.com/google/cadvisor v0.44.0 ./hack/pin-dependency.sh github.com/opencontainers/runc v1.1.0 ./hack/update-vendor.sh ./hack/lint-dependencies.sh # And follow all its recommendations. ./hack/update-vendor.sh ./hack/update-internal-modules.sh ./hack/lint-dependencies.sh # Re-check everything again. Co-Authored-By: Kir Kolyshkin <kolyshkin@gmail.com>
This commit is contained in:
10
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
10
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -105,7 +105,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
|
||||
}
|
||||
|
||||
// CPU.
|
||||
cpuRoot, ok := getControllerPath(cgroupPaths, "cpu", cgroup2UnifiedMode)
|
||||
cpuRoot, ok := GetControllerPath(cgroupPaths, "cpu", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(cpuRoot) {
|
||||
if cgroup2UnifiedMode {
|
||||
@@ -152,7 +152,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
|
||||
|
||||
// Cpu Mask.
|
||||
// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
|
||||
cpusetRoot, ok := getControllerPath(cgroupPaths, "cpuset", cgroup2UnifiedMode)
|
||||
cpusetRoot, ok := GetControllerPath(cgroupPaths, "cpuset", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(cpusetRoot) {
|
||||
spec.HasCpu = true
|
||||
@@ -167,7 +167,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
|
||||
}
|
||||
|
||||
// Memory
|
||||
memoryRoot, ok := getControllerPath(cgroupPaths, "memory", cgroup2UnifiedMode)
|
||||
memoryRoot, ok := GetControllerPath(cgroupPaths, "memory", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if cgroup2UnifiedMode {
|
||||
if utils.FileExists(path.Join(memoryRoot, "memory.max")) {
|
||||
@@ -195,7 +195,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
|
||||
}
|
||||
|
||||
// Processes, read it's value from pids path directly
|
||||
pidsRoot, ok := getControllerPath(cgroupPaths, "pids", cgroup2UnifiedMode)
|
||||
pidsRoot, ok := GetControllerPath(cgroupPaths, "pids", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(pidsRoot) {
|
||||
spec.HasProcesses = true
|
||||
@@ -217,7 +217,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func getControllerPath(cgroupPaths map[string]string, controllerName string, cgroup2UnifiedMode bool) (string, bool) {
|
||||
func GetControllerPath(cgroupPaths map[string]string, controllerName string, cgroup2UnifiedMode bool) (string, bool) {
|
||||
|
||||
ok := false
|
||||
path := ""
|
||||
|
4
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
4
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
@@ -169,8 +170,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
// file descriptors etc.) and not required a proper container's
|
||||
// root PID (systemd services don't have the root PID atm)
|
||||
if h.includedMetrics.Has(container.ProcessMetrics) {
|
||||
paths := h.cgroupManager.GetPaths()
|
||||
path, ok := paths["cpu"]
|
||||
path, ok := common.GetControllerPath(h.cgroupManager.GetPaths(), "cpu", cgroups.IsCgroup2UnifiedMode())
|
||||
if !ok {
|
||||
klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
|
||||
} else {
|
||||
|
11
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
11
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -157,13 +157,14 @@ func diskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStat
|
||||
}
|
||||
|
||||
func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) {
|
||||
config := &configs.Cgroup{
|
||||
Name: name,
|
||||
Resources: &configs.Resources{},
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
path := paths[""]
|
||||
return fs2.NewManager(nil, path, false)
|
||||
return fs2.NewManager(config, path)
|
||||
}
|
||||
|
||||
config := configs.Cgroup{
|
||||
Name: name,
|
||||
}
|
||||
return fs.NewManager(&config, paths, false), nil
|
||||
return fs.NewManager(config, paths)
|
||||
}
|
||||
|
4
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
4
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -20,7 +20,6 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -59,9 +58,6 @@ const (
|
||||
// A pool for restricting the number of consecutive `du` and `find` tasks running.
|
||||
var pool = make(chan struct{}, maxConcurrentOps)
|
||||
|
||||
// ErrDeviceNotInPartitionsMap is the error resulting if a device could not be found in the partitions map.
|
||||
var ErrDeviceNotInPartitionsMap = errors.New("could not find device in cached partitions map")
|
||||
|
||||
func init() {
|
||||
for i := 0; i < maxConcurrentOps; i++ {
|
||||
releaseToken()
|
||||
|
9
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
9
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
@@ -86,8 +86,13 @@ type UsageInfo struct {
|
||||
Inodes uint64
|
||||
}
|
||||
|
||||
// ErrNoSuchDevice is the error indicating the requested device does not exist.
|
||||
var ErrNoSuchDevice = errors.New("cadvisor: no such device")
|
||||
var (
|
||||
// ErrNoSuchDevice is the error indicating the requested device does not exist.
|
||||
ErrNoSuchDevice = errors.New("cadvisor: no such device")
|
||||
|
||||
// ErrDeviceNotInPartitionsMap is the error resulting if a device could not be found in the partitions map.
|
||||
ErrDeviceNotInPartitionsMap = errors.New("could not find device in cached partitions map")
|
||||
)
|
||||
|
||||
type FsInfo interface {
|
||||
// Returns capacity and free space, in bytes, of all the ext2, ext3, ext4 filesystems on the host.
|
||||
|
3
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
3
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -64,6 +64,7 @@ type containerInfo struct {
|
||||
}
|
||||
|
||||
type containerData struct {
|
||||
oomEvents uint64
|
||||
handler container.ContainerHandler
|
||||
info containerInfo
|
||||
memoryCache *memory.InMemoryCache
|
||||
@@ -103,8 +104,6 @@ type containerData struct {
|
||||
|
||||
// resctrlCollector updates stats for resctrl controller.
|
||||
resctrlCollector stats.Collector
|
||||
|
||||
oomEvents uint64
|
||||
}
|
||||
|
||||
// jitter returns a time.Duration between duration and duration + maxFactor * duration,
|
||||
|
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
2
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -158,7 +158,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
klog.Warningf("Cannot detect current cgroup on cgroup v2")
|
||||
} else {
|
||||
selfContainer, err := cgroups.GetOwnCgroupPath("cpu")
|
||||
selfContainer, err = cgroups.GetOwnCgroup("cpu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
2
vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
@@ -77,7 +77,7 @@ var (
|
||||
|
||||
func Setup() error {
|
||||
var err error
|
||||
rootResctrl, err = intelrdt.GetIntelRdtPath(rootContainer)
|
||||
rootResctrl, err = intelrdt.Root()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize resctrl: %v", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user