Merge pull request #36767 from vishh/rename-cgroups-flags
Automatic merge from submit-queue [kubelet] rename --cgroups-per-qos to --experimental-cgroups-per-qos This reflects the true nature of "cgroups per qos" feature. ```release-note * Rename `--cgroups-per-qos` to `--experimental-cgroups-per-qos` in Kubelet ```
This commit is contained in:
@@ -224,7 +224,7 @@ func RegisterNodeFlags() {
|
||||
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
|
||||
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
|
||||
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
|
||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "experimental-cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
|
||||
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
|
||||
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
|
||||
|
||||
@@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
|
||||
PARALLELISM=1
|
||||
|
||||
@@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
TIMEOUT=1h
|
||||
|
||||
@@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
||||
@@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
||||
|
||||
@@ -5,6 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
||||
@@ -18,4 +18,4 @@ CLEANUP=true
|
||||
# If true, current user will be added to the docker group on test node
|
||||
SETUP_NODE=false
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
TEST_ARGS=--cgroups-per-qos=true
|
||||
TEST_ARGS=--experimental-cgroups-per-qos=true
|
||||
|
||||
@@ -223,7 +223,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
}
|
||||
if framework.TestContext.CgroupsPerQOS {
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--cgroups-per-qos", "true",
|
||||
"--experimental-cgroups-per-qos", "true",
|
||||
"--cgroup-root", "/",
|
||||
)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user