Disable cgroups-per-qos flag until implementation is stabilized
This commit is contained in:
parent
4ddfe172ce
commit
fde285cd8f
@ -150,7 +150,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.")
|
||||
fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
|
||||
fs.BoolVar(&s.CgroupsPerQOS, "cgroups-per-qos", s.CgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
//fs.BoolVar(&s.CgroupsPerQOS, "cgroups-per-qos", s.CgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
fs.DurationVar(&s.RuntimeRequestTimeout.Duration, "runtime-request-timeout", s.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later. Default: 2m0s")
|
||||
|
@ -242,6 +242,8 @@ make test_e2e_node TEST_ARGS="--disable-kubenet=false" # disable kubenet
|
||||
|
||||
For testing with the QoS Cgroup Hierarchy enabled, you can pass --cgroups-per-qos flag as an argument into Ginkgo using TEST_ARGS
|
||||
|
||||
*Note: Disabled pending feature stabilization.*
|
||||
|
||||
```sh
|
||||
make test_e2e_node TEST_ARGS="--cgroups-per-qos=true"
|
||||
```
|
||||
|
@ -167,7 +167,8 @@ func RegisterNodeFlags() {
|
||||
// TODO(random-liu): Remove kubelet related flags when we move the kubelet start logic out of the test.
|
||||
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
|
||||
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
|
||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
// TODO: uncomment this when the flag is re-enabled in kubelet
|
||||
//flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
|
||||
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
|
||||
}
|
||||
|
@ -30,44 +30,46 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager [Skip]", func() {
|
||||
Describe("QOS containers", func() {
|
||||
Context("On enabling QOS cgroup hierarchy", func() {
|
||||
It("Top level QoS containers should have been created", func() {
|
||||
if framework.TestContext.CgroupsPerQOS {
|
||||
podName := "qos-pod" + string(uuid.NewUUID())
|
||||
contName := "qos-container" + string(uuid.NewUUID())
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Image: ImageRegistry[busyBoxImage],
|
||||
Name: contName,
|
||||
Command: []string{"sh", "-c", "if [ -d /tmp/memory/Burstable ] && [ -d /tmp/memory/BestEffort ]; then exit 0; else exit 1; fi"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient := f.PodClient()
|
||||
podClient.Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// return fast
|
||||
if !framework.TestContext.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
podName := "qos-pod" + string(uuid.NewUUID())
|
||||
contName := "qos-container" + string(uuid.NewUUID())
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Image: ImageRegistry[busyBoxImage],
|
||||
Name: contName,
|
||||
Command: []string{"sh", "-c", "if [ -d /tmp/memory/Burstable ] && [ -d /tmp/memory/BestEffort ]; then exit 0; else exit 1; fi"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient := f.PodClient()
|
||||
podClient.Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -82,7 +82,8 @@ func (e *E2EServices) Start() error {
|
||||
// out of the test.
|
||||
"--node-name", framework.TestContext.NodeName,
|
||||
"--disable-kubenet="+strconv.FormatBool(framework.TestContext.DisableKubenet),
|
||||
"--cgroups-per-qos="+strconv.FormatBool(framework.TestContext.CgroupsPerQOS),
|
||||
// TODO: enable when flag is introduced in 1.5
|
||||
// "--cgroups-per-qos="+strconv.FormatBool(framework.TestContext.CgroupsPerQOS),
|
||||
"--manifest-path", framework.TestContext.ManifestPath,
|
||||
"--eviction-hard", framework.TestContext.EvictionHard,
|
||||
)
|
||||
@ -365,9 +366,10 @@ func (es *e2eService) startKubeletServer() (*server, error) {
|
||||
"--eviction-pressure-transition-period", "30s",
|
||||
)
|
||||
if framework.TestContext.CgroupsPerQOS {
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--cgroups-per-qos", "true",
|
||||
)
|
||||
// TODO: enable this when the flag is stable and available in kubelet.
|
||||
// cmdArgs = append(cmdArgs,
|
||||
// "--cgroups-per-qos", "true",
|
||||
// )
|
||||
}
|
||||
if !framework.TestContext.DisableKubenet {
|
||||
cwd, err := os.Getwd()
|
||||
|
@ -5,4 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=false
|
||||
# DISABLED --cgroups-per-qos flag until feature stabilized.
|
||||
#TEST_ARGS=--cgroups-per-qos=false
|
||||
TEST_ARGS=
|
||||
|
@ -5,4 +5,6 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=false
|
||||
# DISABLED --cgroups-per-qos flag until feature stabilized.
|
||||
#TEST_ARGS=--cgroups-per-qos=false
|
||||
TEST_ARGS=
|
||||
|
@ -5,5 +5,7 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]"'
|
||||
SETUP_NODE=false
|
||||
TEST_ARGS=--cgroups-per-qos=false
|
||||
# DISABLED --cgroups-per-qos flag until feature stabilized.
|
||||
#TEST_ARGS=--cgroups-per-qos=false
|
||||
TEST_ARGS=
|
||||
PARALLELISM=1
|
||||
|
@ -18,4 +18,6 @@ CLEANUP=true
|
||||
# If true, current user will be added to the docker group on test node
|
||||
SETUP_NODE=false
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
TEST_ARGS=--cgroups-per-qos=false
|
||||
# DISABLED --cgroups-per-qos flag until feature stabilized.
|
||||
#TEST_ARGS=--cgroups-per-qos=false
|
||||
TEST_ARGS=
|
||||
|
Loading…
Reference in New Issue
Block a user