Merge pull request #36767 from vishh/rename-cgroups-flags

Automatic merge from submit-queue

[kubelet] rename --cgroups-per-qos to --experimental-cgroups-per-qos

This reflects the true nature of "cgroups per qos" feature.

```release-note
 * Rename `--cgroups-per-qos` to `--experimental-cgroups-per-qos` in Kubelet
```
This commit is contained in:
Kubernetes Submit Queue
2016-11-14 17:35:19 -08:00
committed by GitHub
21 changed files with 38 additions and 38 deletions

View File

@@ -1401,7 +1401,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
yyq153[55] = x.CloudProvider != ""
yyq153[56] = x.CloudConfigFile != ""
yyq153[57] = x.KubeletCgroups != ""
yyq153[58] = x.CgroupsPerQOS != false
yyq153[58] = x.ExperimentalCgroupsPerQOS != false
yyq153[59] = x.CgroupDriver != ""
yyq153[60] = x.RuntimeCgroups != ""
yyq153[61] = x.SystemCgroups != ""
@@ -2647,7 +2647,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym345
if false {
} else {
r.EncodeBool(bool(x.CgroupsPerQOS))
r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
}
} else {
r.EncodeBool(false)
@@ -2655,13 +2655,13 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
if yyq153[58] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("cgroupsPerQOS"))
r.EncodeString(codecSelferC_UTF81234, string("experimentalCgroupsPerQOS"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym346 := z.EncBinary()
_ = yym346
if false {
} else {
r.EncodeBool(bool(x.CgroupsPerQOS))
r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
}
}
}
@@ -4378,11 +4378,11 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
} else {
x.KubeletCgroups = string(r.DecodeString())
}
case "cgroupsPerQOS":
case "experimentalCgroupsPerQOS":
if r.TryDecodeAsNil() {
x.CgroupsPerQOS = false
x.ExperimentalCgroupsPerQOS = false
} else {
x.CgroupsPerQOS = bool(r.DecodeBool())
x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
}
case "cgroupDriver":
if r.TryDecodeAsNil() {
@@ -5807,9 +5807,9 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.CgroupsPerQOS = false
x.ExperimentalCgroupsPerQOS = false
} else {
x.CgroupsPerQOS = bool(r.DecodeBool())
x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
}
yyj649++
if yyhl649 {

View File

@@ -294,7 +294,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup.
// +optional
CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"`
ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string `json:"cgroupDriver,omitempty"`

View File

@@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes"
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false)
if obj.ExperimentalCgroupsPerQOS == nil {
obj.ExperimentalCgroupsPerQOS = boolVar(false)
}
if obj.ContainerRuntime == "" {
obj.ContainerRuntime = "docker"
@@ -391,9 +391,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
temp := int32(defaultIPTablesDropBit)
obj.IPTablesDropBit = &temp
}
if obj.CgroupsPerQOS == nil {
if obj.ExperimentalCgroupsPerQOS == nil {
temp := false
obj.CgroupsPerQOS = &temp
obj.ExperimentalCgroupsPerQOS = &temp
}
if obj.CgroupDriver == "" {
obj.CgroupDriver = "cgroupfs"
@@ -401,8 +401,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
// container runtime default and not default to the root cgroup.
if obj.CgroupsPerQOS != nil {
if *obj.CgroupsPerQOS {
if obj.ExperimentalCgroupsPerQOS != nil {
if *obj.ExperimentalCgroupsPerQOS {
if obj.CgroupRoot == "" {
obj.CgroupRoot = "/"
}

View File

@@ -355,7 +355,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup.
// +optional
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string `json:"cgroupDriver,omitempty"`

View File

@@ -330,7 +330,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
@@ -496,7 +496,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver

View File

@@ -302,12 +302,12 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if in.CgroupsPerQOS != nil {
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
if in.ExperimentalCgroupsPerQOS != nil {
in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
*out = new(bool)
**out = **in
} else {
out.CgroupsPerQOS = nil
out.ExperimentalCgroupsPerQOS = nil
}
out.CgroupDriver = in.CgroupDriver
out.ContainerRuntime = in.ContainerRuntime

View File

@@ -308,7 +308,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups
out.CgroupsPerQOS = in.CgroupsPerQOS
out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS
out.CgroupDriver = in.CgroupDriver
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups

View File

@@ -2600,7 +2600,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "",
},
},
"cgroupsPerQOS": {
"experimentalCgroupsPerQOS": {
SchemaProps: spec.SchemaProps{
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
Type: []string{"boolean"},
@@ -14428,7 +14428,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "",
},
},
"cgroupsPerQOS": {
"experimentalCgroupsPerQOS": {
SchemaProps: spec.SchemaProps{
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
Type: []string{"boolean"},

View File

@@ -435,7 +435,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupsPerQOS: kubeCfg.ExperimentalCgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,

View File

@@ -138,7 +138,7 @@ func GetHollowKubeletConfig(
c.EnableCustomMetrics = false
c.EnableDebuggingHandlers = true
c.EnableServer = true
c.CgroupsPerQOS = false
c.ExperimentalCgroupsPerQOS = false
// hairpin-veth is used to allow hairpin packets. Note that this deviates from
// what the "real" kubelet currently does, because there's no way to
// set promiscuous mode on docker0.