Merge pull request #109029 from ehashman/runc-1.1.0
Dep bump to runc 1.1.0, cadvisor 0.44.0
This commit is contained in:
@@ -28,9 +28,8 @@ import (
|
||||
"time"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
cgroupfs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/manager"
|
||||
cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -42,14 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
)
|
||||
|
||||
// libcontainerCgroupManagerType defines how to interface with libcontainer
|
||||
type libcontainerCgroupManagerType string
|
||||
|
||||
const (
|
||||
// libcontainerCgroupfs means use libcontainer with cgroupfs
|
||||
libcontainerCgroupfs libcontainerCgroupManagerType = "cgroupfs"
|
||||
// libcontainerSystemd means use libcontainer with systemd
|
||||
libcontainerSystemd libcontainerCgroupManagerType = "systemd"
|
||||
// systemdSuffix is the cgroup name suffix for systemd
|
||||
systemdSuffix string = ".slice"
|
||||
// MemoryMin is memory.min for cgroup v2
|
||||
@@ -133,39 +125,6 @@ func IsSystemdStyleName(name string) bool {
|
||||
return strings.HasSuffix(name, systemdSuffix)
|
||||
}
|
||||
|
||||
// libcontainerAdapter provides a simplified interface to libcontainer based on libcontainer type.
|
||||
type libcontainerAdapter struct {
|
||||
// cgroupManagerType defines how to interface with libcontainer
|
||||
cgroupManagerType libcontainerCgroupManagerType
|
||||
}
|
||||
|
||||
// newLibcontainerAdapter returns a configured libcontainerAdapter for specified manager.
|
||||
// it does any initialization required by that manager to function.
|
||||
func newLibcontainerAdapter(cgroupManagerType libcontainerCgroupManagerType) *libcontainerAdapter {
|
||||
return &libcontainerAdapter{cgroupManagerType: cgroupManagerType}
|
||||
}
|
||||
|
||||
// newManager returns an implementation of cgroups.Manager
|
||||
func (l *libcontainerAdapter) newManager(cgroups *libcontainerconfigs.Cgroup, paths map[string]string) (libcontainercgroups.Manager, error) {
|
||||
switch l.cgroupManagerType {
|
||||
case libcontainerCgroupfs:
|
||||
if libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||
return cgroupfs2.NewManager(cgroups, paths["memory"], false)
|
||||
}
|
||||
return cgroupfs.NewManager(cgroups, paths, false), nil
|
||||
case libcontainerSystemd:
|
||||
// this means you asked systemd to manage cgroups, but systemd was not on the host, so all you can do is panic...
|
||||
if !cgroupsystemd.IsRunningSystemd() {
|
||||
panic("systemd cgroup manager not available")
|
||||
}
|
||||
if libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||
return cgroupsystemd.NewUnifiedManager(cgroups, paths["memory"], false), nil
|
||||
}
|
||||
return cgroupsystemd.NewLegacyManager(cgroups, paths), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid cgroup manager configuration")
|
||||
}
|
||||
|
||||
// CgroupSubsystems holds information about the mounted cgroup subsystems
|
||||
type CgroupSubsystems struct {
|
||||
// Cgroup subsystem mounts.
|
||||
@@ -180,13 +139,14 @@ type CgroupSubsystems struct {
|
||||
// cgroupManagerImpl implements the CgroupManager interface.
|
||||
// Its a stateless object which can be used to
|
||||
// update,create or delete any number of cgroups
|
||||
// It uses the Libcontainer raw fs cgroup manager for cgroup management.
|
||||
// It relies on runc/libcontainer cgroup managers.
|
||||
type cgroupManagerImpl struct {
|
||||
// subsystems holds information about all the
|
||||
// mounted cgroup subsystems on the node
|
||||
subsystems *CgroupSubsystems
|
||||
// simplifies interaction with libcontainer and its cgroup managers
|
||||
adapter *libcontainerAdapter
|
||||
|
||||
// useSystemd tells if systemd cgroup manager should be used.
|
||||
useSystemd bool
|
||||
}
|
||||
|
||||
// Make sure that cgroupManagerImpl implements the CgroupManager interface
|
||||
@@ -194,20 +154,16 @@ var _ CgroupManager = &cgroupManagerImpl{}
|
||||
|
||||
// NewCgroupManager is a factory method that returns a CgroupManager
|
||||
func NewCgroupManager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
|
||||
managerType := libcontainerCgroupfs
|
||||
if cgroupDriver == string(libcontainerSystemd) {
|
||||
managerType = libcontainerSystemd
|
||||
}
|
||||
return &cgroupManagerImpl{
|
||||
subsystems: cs,
|
||||
adapter: newLibcontainerAdapter(managerType),
|
||||
useSystemd: cgroupDriver == "systemd",
|
||||
}
|
||||
}
|
||||
|
||||
// Name converts the cgroup to the driver specific value in cgroupfs form.
|
||||
// This always returns a valid cgroupfs path even when systemd driver is in use!
|
||||
func (m *cgroupManagerImpl) Name(name CgroupName) string {
|
||||
if m.adapter.cgroupManagerType == libcontainerSystemd {
|
||||
if m.useSystemd {
|
||||
return name.ToSystemd()
|
||||
}
|
||||
return name.ToCgroupfs()
|
||||
@@ -215,7 +171,7 @@ func (m *cgroupManagerImpl) Name(name CgroupName) string {
|
||||
|
||||
// CgroupName converts the literal cgroupfs name on the host to an internal identifier.
|
||||
func (m *cgroupManagerImpl) CgroupName(name string) CgroupName {
|
||||
if m.adapter.cgroupManagerType == libcontainerSystemd {
|
||||
if m.useSystemd {
|
||||
return ParseSystemdToCgroupName(name)
|
||||
}
|
||||
return ParseCgroupfsToCgroupName(name)
|
||||
@@ -239,14 +195,16 @@ func (m *cgroupManagerImpl) buildCgroupUnifiedPath(name CgroupName) string {
|
||||
|
||||
// libctCgroupConfig converts CgroupConfig to libcontainer's Cgroup config.
|
||||
func (m *cgroupManagerImpl) libctCgroupConfig(in *CgroupConfig, needResources bool) *libcontainerconfigs.Cgroup {
|
||||
config := &libcontainerconfigs.Cgroup{}
|
||||
config := &libcontainerconfigs.Cgroup{
|
||||
Systemd: m.useSystemd,
|
||||
}
|
||||
if needResources {
|
||||
config.Resources = m.toResources(in.ResourceParameters)
|
||||
} else {
|
||||
config.Resources = &libcontainerconfigs.Resources{}
|
||||
}
|
||||
|
||||
if m.adapter.cgroupManagerType == libcontainerCgroupfs {
|
||||
if !config.Systemd {
|
||||
// For fs cgroup manager, we can either set Path or Name and Parent.
|
||||
// Setting Path is easier.
|
||||
config.Path = in.Name.ToCgroupfs()
|
||||
@@ -335,11 +293,8 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
|
||||
metrics.CgroupManagerDuration.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
|
||||
}()
|
||||
|
||||
cgroupPaths := m.buildCgroupPaths(cgroupConfig.Name)
|
||||
|
||||
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, false)
|
||||
|
||||
manager, err := m.adapter.newManager(libcontainerCgroupConfig, cgroupPaths)
|
||||
manager, err := manager.New(libcontainerCgroupConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -463,7 +418,7 @@ func (m *cgroupManagerImpl) maybeSetHugetlb(resourceConfig *ResourceConfig, reso
|
||||
pageSizes.Insert(sizeString)
|
||||
}
|
||||
// for each page size omitted, limit to 0
|
||||
for _, pageSize := range cgroupfs.HugePageSizes {
|
||||
for _, pageSize := range libcontainercgroups.HugePageSizes() {
|
||||
if pageSizes.Has(pageSize) {
|
||||
continue
|
||||
}
|
||||
@@ -482,16 +437,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
|
||||
}()
|
||||
|
||||
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, true)
|
||||
|
||||
unified := libcontainercgroups.IsCgroup2UnifiedMode()
|
||||
var paths map[string]string
|
||||
if unified {
|
||||
libcontainerCgroupConfig.Path = m.Name(cgroupConfig.Name)
|
||||
} else {
|
||||
paths = m.buildCgroupPaths(cgroupConfig.Name)
|
||||
}
|
||||
|
||||
manager, err := m.adapter.newManager(libcontainerCgroupConfig, paths)
|
||||
manager, err := manager.New(libcontainerCgroupConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cgroup manager: %v", err)
|
||||
}
|
||||
@@ -506,9 +452,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
|
||||
}()
|
||||
|
||||
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, true)
|
||||
|
||||
// get the manager with the specified cgroup configuration
|
||||
manager, err := m.adapter.newManager(libcontainerCgroupConfig, nil)
|
||||
manager, err := manager.New(libcontainerCgroupConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -30,8 +30,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
cgroupfs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/manager"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/mount-utils"
|
||||
@@ -380,13 +379,10 @@ func createManager(containerName string) (cgroups.Manager, error) {
|
||||
Resources: &configs.Resources{
|
||||
SkipDevices: true,
|
||||
},
|
||||
Systemd: false,
|
||||
}
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
return cgroupfs2.NewManager(cg, "", false)
|
||||
|
||||
}
|
||||
return cgroupfs.NewManager(cg, nil, false), nil
|
||||
return manager.New(cg)
|
||||
}
|
||||
|
||||
type KernelTunableBehavior string
|
||||
|
@@ -320,7 +320,7 @@ func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver str
|
||||
if cgroupsPerQOS {
|
||||
nodeAllocatableRoot = NewCgroupName(nodeAllocatableRoot, defaultNodeAllocatableCgroupName)
|
||||
}
|
||||
if libcontainerCgroupManagerType(cgroupDriver) == libcontainerSystemd {
|
||||
if cgroupDriver == "systemd" {
|
||||
return nodeAllocatableRoot.ToSystemd()
|
||||
}
|
||||
return nodeAllocatableRoot.ToCgroupfs()
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@@ -107,13 +107,15 @@ func TestIsCgroupPod(t *testing.T) {
|
||||
qosContainersInfo: qosContainersInfo,
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
// give the right cgroup structure based on driver
|
||||
cgroupfs := testCase.input.ToCgroupfs()
|
||||
// Give the right cgroup structure based on whether systemd is enabled.
|
||||
var name string
|
||||
if cgroupDriver == "systemd" {
|
||||
cgroupfs = testCase.input.ToSystemd()
|
||||
name = testCase.input.ToSystemd()
|
||||
} else {
|
||||
name = testCase.input.ToCgroupfs()
|
||||
}
|
||||
// check if this is a pod or not with the literal cgroupfs input
|
||||
result, resultUID := pcm.IsPodCgroup(cgroupfs)
|
||||
result, resultUID := pcm.IsPodCgroup(name)
|
||||
if result != testCase.expectedResult {
|
||||
t.Errorf("Unexpected result for driver: %v, input: %v, expected: %v, actual: %v", cgroupDriver, testCase.input, testCase.expectedResult, result)
|
||||
}
|
||||
|
@@ -23,14 +23,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
@@ -147,7 +146,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
// setHugePagesUnbounded ensures hugetlb is effectively unbounded
|
||||
func (m *qosContainerManagerImpl) setHugePagesUnbounded(cgroupConfig *CgroupConfig) error {
|
||||
hugePageLimit := map[int64]int64{}
|
||||
for _, pageSize := range cgroupfs.HugePageSizes {
|
||||
for _, pageSize := range libcontainercgroups.HugePageSizes() {
|
||||
pageSizeBytes, err := units.RAMInBytes(pageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@@ -24,7 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -170,7 +169,7 @@ func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtim
|
||||
var hugepageLimits []*runtimeapi.HugepageLimit
|
||||
|
||||
// For each page size, limit to 0.
|
||||
for _, pageSize := range cgroupfs.HugePageSizes {
|
||||
for _, pageSize := range libcontainercgroups.HugePageSizes() {
|
||||
hugepageLimits = append(hugepageLimits, &runtimeapi.HugepageLimit{
|
||||
PageSize: pageSize,
|
||||
Limit: uint64(0),
|
||||
|
@@ -25,7 +25,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -366,7 +366,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
var baseHugepage []*runtimeapi.HugepageLimit
|
||||
|
||||
// For each page size, limit to 0.
|
||||
for _, pageSize := range cgroupfs.HugePageSizes {
|
||||
for _, pageSize := range libcontainercgroups.HugePageSizes() {
|
||||
baseHugepage = append(baseHugepage, &runtimeapi.HugepageLimit{
|
||||
PageSize: pageSize,
|
||||
Limit: uint64(0),
|
||||
@@ -481,7 +481,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
machineHugepageSupport := true
|
||||
for _, hugepageLimit := range test.expected {
|
||||
hugepageSupport := false
|
||||
for _, pageSize := range cgroupfs.HugePageSizes {
|
||||
for _, pageSize := range libcontainercgroups.HugePageSizes() {
|
||||
if pageSize == hugepageLimit.PageSize {
|
||||
hugepageSupport = true
|
||||
break
|
||||
|
Reference in New Issue
Block a user