
Automatic merge from submit-queue (batch tested with PRs 57973, 57990). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Set pids limit at pod level **What this PR does / why we need it**: Add a new Alpha Feature to set a maximum number of pids per Pod. This is to allow the use case where cluster administrators wish to limit the pids consumed per pod (example when running a CI system). By default, we do not set any maximum limit, If an administrator wants to enable this, they should enable `SupportPodPidsLimit=true` in the `--feature-gates=` parameter to kubelet and specify the limit using the `--pod-max-pids` parameter. The limit set is the total count of all processes running in all containers in the pod. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #43783 **Special notes for your reviewer**: **Release note**: ```release-note New alpha feature to limit the number of processes running in a pod. Cluster administrators will be able to place limits by using the new kubelet command line parameter --pod-max-pids. Note that since this is a alpha feature they will need to enable the "SupportPodPidsLimit" feature. ```
160 lines
5.7 KiB
Go
160 lines
5.7 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package cm
|
|
|
|
import (
|
|
"time"
|
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
|
"k8s.io/api/core/v1"
|
|
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
|
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
|
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
)
|
|
|
|
type ActivePodsFunc func() []*v1.Pod
|
|
|
|
// Manages the containers running on a machine.
|
|
type ContainerManager interface {
|
|
// Runs the container manager's housekeeping.
|
|
// - Ensures that the Docker daemon is in a container.
|
|
// - Creates the system container where all non-containerized processes run.
|
|
Start(*v1.Node, ActivePodsFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService) error
|
|
|
|
// SystemCgroupsLimit returns resources allocated to system cgroups in the machine.
|
|
// These cgroups include the system and Kubernetes services.
|
|
SystemCgroupsLimit() v1.ResourceList
|
|
|
|
// GetNodeConfig returns a NodeConfig that is being used by the container manager.
|
|
GetNodeConfig() NodeConfig
|
|
|
|
// Status returns internal Status.
|
|
Status() Status
|
|
|
|
// NewPodContainerManager is a factory method which returns a podContainerManager object
|
|
// Returns a noop implementation if qos cgroup hierarchy is not enabled
|
|
NewPodContainerManager() PodContainerManager
|
|
|
|
// GetMountedSubsystems returns the mounted cgroup subsystems on the node
|
|
GetMountedSubsystems() *CgroupSubsystems
|
|
|
|
// GetQOSContainersInfo returns the names of top level QoS containers
|
|
GetQOSContainersInfo() QOSContainersInfo
|
|
|
|
// GetNodeAllocatableReservation returns the amount of compute resources that have to be reserved from scheduling.
|
|
GetNodeAllocatableReservation() v1.ResourceList
|
|
|
|
// GetCapacity returns the amount of compute resources tracked by container manager available on the node.
|
|
GetCapacity() v1.ResourceList
|
|
|
|
// GetDevicePluginResourceCapacity returns the node capacity (amount of total device plugin resources),
|
|
// node allocatable (amount of total healthy resources reported by device plugin),
|
|
// and inactive device plugin resources previously registered on the node.
|
|
GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
|
|
|
// UpdateQOSCgroups performs housekeeping updates to ensure that the top
|
|
// level QoS containers have their desired state in a thread-safe way
|
|
UpdateQOSCgroups() error
|
|
|
|
// GetResources returns RunContainerOptions with devices, mounts, and env fields populated for
|
|
// extended resources required by container.
|
|
GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error)
|
|
|
|
// UpdatePluginResources calls Allocate of device plugin handler for potential
|
|
// requests for device plugin resources, and returns an error if fails.
|
|
// Otherwise, it updates allocatableResource in nodeInfo if necessary,
|
|
// to make sure it is at least equal to the pod's requested capacity for
|
|
// any registered device plugin resource
|
|
UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error
|
|
|
|
InternalContainerLifecycle() InternalContainerLifecycle
|
|
}
|
|
|
|
type NodeConfig struct {
|
|
RuntimeCgroupsName string
|
|
SystemCgroupsName string
|
|
KubeletCgroupsName string
|
|
ContainerRuntime string
|
|
CgroupsPerQOS bool
|
|
CgroupRoot string
|
|
CgroupDriver string
|
|
KubeletRootDir string
|
|
ProtectKernelDefaults bool
|
|
NodeAllocatableConfig
|
|
ExperimentalQOSReserved map[v1.ResourceName]int64
|
|
ExperimentalCPUManagerPolicy string
|
|
ExperimentalCPUManagerReconcilePeriod time.Duration
|
|
ExperimentalPodPidsLimit int64
|
|
}
|
|
|
|
type NodeAllocatableConfig struct {
|
|
KubeReservedCgroupName string
|
|
SystemReservedCgroupName string
|
|
EnforceNodeAllocatable sets.String
|
|
KubeReserved v1.ResourceList
|
|
SystemReserved v1.ResourceList
|
|
HardEvictionThresholds []evictionapi.Threshold
|
|
}
|
|
|
|
type Status struct {
|
|
// Any soft requirements that were unsatisfied.
|
|
SoftRequirements error
|
|
}
|
|
|
|
// parsePercentage parses the percentage string to numeric value.
|
|
func parsePercentage(v string) (int64, error) {
|
|
if !strings.HasSuffix(v, "%") {
|
|
return 0, fmt.Errorf("percentage expected, got '%s'", v)
|
|
}
|
|
percentage, err := strconv.ParseInt(strings.TrimRight(v, "%"), 10, 0)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("invalid number in percentage '%s'", v)
|
|
}
|
|
if percentage < 0 || percentage > 100 {
|
|
return 0, fmt.Errorf("percentage must be between 0 and 100")
|
|
}
|
|
return percentage, nil
|
|
}
|
|
|
|
// ParseQOSReserved parses the --qos-reserve-requests option
|
|
func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) {
|
|
reservations := make(map[v1.ResourceName]int64)
|
|
for k, v := range m {
|
|
switch v1.ResourceName(k) {
|
|
// Only memory resources are supported.
|
|
case v1.ResourceMemory:
|
|
q, err := parsePercentage(v)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
reservations[v1.ResourceName(k)] = q
|
|
default:
|
|
return nil, fmt.Errorf("cannot reserve %q resource", k)
|
|
}
|
|
}
|
|
return &reservations, nil
|
|
}
|