Merge pull request #108492 from kerthcet/feature/add-NodeInclustionPolicies
Add NodeInclusionPolicy to TopologySpreadConstraint in PodSpec
This commit is contained in:
commit
cc66198570
10
api/openapi-spec/swagger.json
generated
10
api/openapi-spec/swagger.json
generated
@ -10328,8 +10328,16 @@
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
"description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
"description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"topologyKey": {
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"type": "string"
|
||||
},
|
||||
"whenUnsatisfiable": {
|
||||
|
@ -7262,9 +7262,17 @@
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
"description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
"description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"topologyKey": {
|
||||
"default": "",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"type": "string"
|
||||
},
|
||||
"whenUnsatisfiable": {
|
||||
|
@ -4359,9 +4359,17 @@
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
"description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
"description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"topologyKey": {
|
||||
"default": "",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"type": "string"
|
||||
},
|
||||
"whenUnsatisfiable": {
|
||||
|
@ -3438,9 +3438,17 @@
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
"description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
"description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"topologyKey": {
|
||||
"default": "",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"type": "string"
|
||||
},
|
||||
"whenUnsatisfiable": {
|
||||
|
@ -3206,9 +3206,17 @@
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
"description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
"description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"type": "string"
|
||||
},
|
||||
"topologyKey": {
|
||||
"default": "",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"type": "string"
|
||||
},
|
||||
"whenUnsatisfiable": {
|
||||
|
@ -580,6 +580,7 @@ func dropDisabledFields(
|
||||
}
|
||||
|
||||
dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec)
|
||||
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
|
||||
}
|
||||
|
||||
// dropDisabledTopologySpreadConstraintsFields removes disabled fields from PodSpec related
|
||||
@ -647,6 +648,51 @@ func dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledNodeInclusionPolicyFields removes disabled fields from PodSpec related
|
||||
// to NodeInclusionPolicy only if it is not used by the old spec.
|
||||
func dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread) && podSpec != nil {
|
||||
if !nodeTaintsPolicyInUse(oldPodSpec) {
|
||||
for i := range podSpec.TopologySpreadConstraints {
|
||||
podSpec.TopologySpreadConstraints[i].NodeTaintsPolicy = nil
|
||||
}
|
||||
}
|
||||
if !nodeAffinityPolicyInUse(oldPodSpec) {
|
||||
for i := range podSpec.TopologySpreadConstraints {
|
||||
podSpec.TopologySpreadConstraints[i].NodeAffinityPolicy = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nodeAffinityPolicyInUse returns true if the pod spec is non-nil and has NodeAffinityPolicy field set
|
||||
// in TopologySpreadConstraints
|
||||
func nodeAffinityPolicyInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range podSpec.TopologySpreadConstraints {
|
||||
if c.NodeAffinityPolicy != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// nodeTaintsPolicyInUse returns true if the pod spec is non-nil and has NodeTaintsPolicy field set
|
||||
// in TopologySpreadConstraints
|
||||
func nodeTaintsPolicyInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range podSpec.TopologySpreadConstraints {
|
||||
if c.NodeTaintsPolicy != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ephemeralContainersInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
|
@ -1768,3 +1768,247 @@ func TestDropOSField(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropNodeInclusionPolicyFields(t *testing.T) {
|
||||
ignore := api.NodeInclusionPolicyIgnore
|
||||
honor := api.NodeInclusionPolicyHonor
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
enabled bool
|
||||
podSpec *api.PodSpec
|
||||
oldPodSpec *api.PodSpec
|
||||
wantPodSpec *api.PodSpec
|
||||
}{
|
||||
{
|
||||
name: "feature disabled, both pods don't use the fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only old pod use NodeAffinityPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only old pod use NodeTaintsPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only current pod use NodeAffinityPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{{
|
||||
NodeAffinityPolicy: nil,
|
||||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only current pod use NodeTaintsPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, both pods use NodeAffinityPolicy fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, both pods use NodeTaintsPolicy fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &honor},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &honor},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, both pods use the fields",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only old pod use NodeAffinityPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only old pod use NodeTaintsPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only current pod use NodeAffinityPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only current pod use NodeTaintsPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, test.enabled)()
|
||||
|
||||
dropDisabledFields(test.podSpec, nil, test.oldPodSpec, nil)
|
||||
if diff := cmp.Diff(test.wantPodSpec, test.podSpec); diff != "" {
|
||||
t.Errorf("unexpected pod spec (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -5616,6 +5616,17 @@ const (
|
||||
ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
|
||||
)
|
||||
|
||||
// NodeInclusionPolicy defines the type of node inclusion policy
|
||||
// +enum
|
||||
type NodeInclusionPolicy string
|
||||
|
||||
const (
|
||||
// NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore"
|
||||
// NodeInclusionPolicyHonor means use this scheduling policy when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor"
|
||||
)
|
||||
|
||||
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
|
||||
type TopologySpreadConstraint struct {
|
||||
// MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
@ -5644,7 +5655,8 @@ type TopologySpreadConstraint struct {
|
||||
// We consider each <key, value> as a "bucket", and try to put balanced number
|
||||
// of pods into each bucket.
|
||||
// We define a domain as a particular instance of a topology.
|
||||
// Also, we define an eligible domain as a domain whose nodes match the node selector.
|
||||
// Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
// nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
|
||||
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
|
||||
// It's a required field.
|
||||
@ -5702,6 +5714,25 @@ type TopologySpreadConstraint struct {
|
||||
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
|
||||
// +optional
|
||||
MinDomains *int32
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
// when calculating pod topology spread skew. Options are:
|
||||
// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeAffinityPolicy *NodeInclusionPolicy
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
// pod topology spread skew. Options are:
|
||||
// - Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
// has a toleration, are included.
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeTaintsPolicy *NodeInclusionPolicy
|
||||
}
|
||||
|
||||
// These are the built-in errors for PortStatus.
|
||||
|
4
pkg/apis/core/v1/zz_generated.conversion.go
generated
4
pkg/apis/core/v1/zz_generated.conversion.go
generated
@ -7997,6 +7997,8 @@ func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in
|
||||
out.WhenUnsatisfiable = core.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
|
||||
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
|
||||
out.NodeAffinityPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
|
||||
out.NodeTaintsPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -8011,6 +8013,8 @@ func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in
|
||||
out.WhenUnsatisfiable = v1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
|
||||
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
|
||||
out.NodeAffinityPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
|
||||
out.NodeTaintsPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -6490,6 +6490,12 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
allErrs = append(allErrs, validateMinDomains(subFldPath.Child("minDomains"), constraint.MinDomains, constraint.WhenUnsatisfiable)...)
|
||||
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeAffinityPolicy"), constraint.NodeAffinityPolicy); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeTaintsPolicy"), constraint.NodeTaintsPolicy); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -6547,6 +6553,22 @@ func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.Topo
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
supportedPodTopologySpreadNodePolicies = sets.NewString(string(core.NodeInclusionPolicyIgnore), string(core.NodeInclusionPolicyHonor))
|
||||
)
|
||||
|
||||
// validateNodeAffinityPolicy tests that the argument is a valid NodeInclusionPolicy.
|
||||
func validateNodeInclusionPolicy(fldPath *field.Path, policy *core.NodeInclusionPolicy) *field.Error {
|
||||
if policy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !supportedPodTopologySpreadNodePolicies.Has(string(*policy)) {
|
||||
return field.NotSupported(fldPath, policy, supportedPodTopologySpreadNodePolicies.List())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,,
|
||||
// .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used
|
||||
// during IP init and allocation.
|
||||
|
@ -18926,6 +18926,12 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
||||
fieldPathTopologyKey := subFldPath0.Child("topologyKey")
|
||||
fieldPathWhenUnsatisfiable := subFldPath0.Child("whenUnsatisfiable")
|
||||
fieldPathTopologyKeyAndWhenUnsatisfiable := subFldPath0.Child("{topologyKey, whenUnsatisfiable}")
|
||||
nodeAffinityField := subFldPath0.Child("nodeAffinityPolicy")
|
||||
nodeTaintsField := subFldPath0.Child("nodeTaintsPolicy")
|
||||
unknown := core.NodeInclusionPolicy("Unknown")
|
||||
ignore := core.NodeInclusionPolicyIgnore
|
||||
honor := core.NodeInclusionPolicyHonor
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
constraints []core.TopologySpreadConstraint
|
||||
@ -19055,6 +19061,49 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
||||
field.Duplicate(fieldPathTopologyKeyAndWhenUnsatisfiable, fmt.Sprintf("{%v, %v}", "k8s.io/zone", core.DoNotSchedule)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "supported policy name set on NodeAffinityPolicy and NodeTaintsPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{},
|
||||
},
|
||||
{
|
||||
name: "unsupported policy name set on NodeAffinityPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &unknown,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{
|
||||
field.NotSupported(nodeAffinityField, &unknown, supportedPodTopologySpreadNodePolicies.List()),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unsupported policy name set on NodeTaintsPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &unknown,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{
|
||||
field.NotSupported(nodeTaintsField, &unknown, supportedPodTopologySpreadNodePolicies.List()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
10
pkg/apis/core/zz_generated.deepcopy.go
generated
10
pkg/apis/core/zz_generated.deepcopy.go
generated
@ -5639,6 +5639,16 @@ func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint)
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeAffinityPolicy != nil {
|
||||
in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeTaintsPolicy != nil {
|
||||
in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -803,6 +803,14 @@ const (
|
||||
//
|
||||
// Enables support for 'HostProcess' containers on Windows nodes.
|
||||
WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers"
|
||||
|
||||
// owner: @kerthcet
|
||||
// kep: http://kep.k8s.io/3094
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when
|
||||
// calculating pod topology spread skew.
|
||||
NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -1030,6 +1038,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
NodeInclusionPolicyInPodTopologySpread: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
||||
|
16
pkg/generated/openapi/zz_generated.openapi.go
generated
16
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -25417,7 +25417,7 @@ func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallb
|
||||
},
|
||||
"topologyKey": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
@ -25444,6 +25444,20 @@ func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallb
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"maxSkew", "topologyKey", "whenUnsatisfiable"},
|
||||
},
|
||||
|
@ -17,13 +17,13 @@ limitations under the License.
|
||||
package v1beta3
|
||||
|
||||
import (
|
||||
"k8s.io/kube-scheduler/config/v1beta3"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kube-scheduler/config/v1beta3"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
1879
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
1879
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
File diff suppressed because it is too large
Load Diff
@ -5415,7 +5415,8 @@ message TopologySpreadConstraint {
|
||||
// We consider each <key, value> as a "bucket", and try to put balanced number
|
||||
// of pods into each bucket.
|
||||
// We define a domain as a particular instance of a topology.
|
||||
// Also, we define an eligible domain as a domain whose nodes match the node selector.
|
||||
// Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
// nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
|
||||
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
|
||||
// It's a required field.
|
||||
@ -5476,6 +5477,27 @@ message TopologySpreadConstraint {
|
||||
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
|
||||
// +optional
|
||||
optional int32 minDomains = 5;
|
||||
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
// when calculating pod topology spread skew. Options are:
|
||||
// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
optional string nodeAffinityPolicy = 6;
|
||||
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
// pod topology spread skew. Options are:
|
||||
// - Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
// has a toleration, are included.
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
optional string nodeTaintsPolicy = 7;
|
||||
}
|
||||
|
||||
// TypedLocalObjectReference contains enough information to let you locate the
|
||||
|
@ -3339,6 +3339,17 @@ const (
|
||||
ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
|
||||
)
|
||||
|
||||
// NodeInclusionPolicy defines the type of node inclusion policy
|
||||
// +enum
|
||||
type NodeInclusionPolicy string
|
||||
|
||||
const (
|
||||
// NodeInclusionPolicyIgnore means ignore this scheduling policy when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore"
|
||||
// NodeInclusionPolicyHonor means use this scheduling policy when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor"
|
||||
)
|
||||
|
||||
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
|
||||
type TopologySpreadConstraint struct {
|
||||
// MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
@ -3367,7 +3378,8 @@ type TopologySpreadConstraint struct {
|
||||
// We consider each <key, value> as a "bucket", and try to put balanced number
|
||||
// of pods into each bucket.
|
||||
// We define a domain as a particular instance of a topology.
|
||||
// Also, we define an eligible domain as a domain whose nodes match the node selector.
|
||||
// Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
// nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
|
||||
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
|
||||
// It's a required field.
|
||||
@ -3425,6 +3437,25 @@ type TopologySpreadConstraint struct {
|
||||
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
|
||||
// +optional
|
||||
MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"`
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
// when calculating pod topology spread skew. Options are:
|
||||
// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"`
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
// pod topology spread skew. Options are:
|
||||
// - Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
// has a toleration, are included.
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -2400,12 +2400,14 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_TopologySpreadConstraint = map[string]string{
|
||||
"": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
|
||||
"maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ",
|
||||
"topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
|
||||
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
|
||||
"minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
|
||||
"": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
|
||||
"maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ",
|
||||
"topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
|
||||
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
|
||||
"minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
|
||||
"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
"nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
}
|
||||
|
||||
func (TopologySpreadConstraint) SwaggerDoc() map[string]string {
|
||||
|
@ -5654,6 +5654,16 @@ func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint)
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeAffinityPolicy != nil {
|
||||
in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeTaintsPolicy != nil {
|
||||
in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1614,7 +1614,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -856,6 +856,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1616,7 +1616,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -854,6 +854,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -858,6 +858,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -854,6 +854,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1614,7 +1614,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -856,6 +856,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1616,7 +1616,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -854,6 +854,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1668,7 +1668,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -889,6 +889,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1619,7 +1619,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
Binary file not shown.
@ -853,6 +853,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1668,7 +1668,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -889,6 +889,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1662,7 +1662,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -886,6 +886,8 @@ template:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1556,7 +1556,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
BIN
staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb
vendored
BIN
staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb
vendored
Binary file not shown.
@ -804,6 +804,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1599,7 +1599,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -837,6 +837,8 @@ template:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1605,7 +1605,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -842,6 +842,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1614,7 +1614,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1615,7 +1615,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -858,6 +858,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -1616,7 +1616,9 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"minDomains": 5
|
||||
"minDomains": 5,
|
||||
"nodeAffinityPolicy": "nodeAffinityPolicyValue",
|
||||
"nodeTaintsPolicy": "nodeTaintsPolicyValue"
|
||||
}
|
||||
],
|
||||
"setHostnameAsFQDN": true,
|
||||
|
Binary file not shown.
@ -848,6 +848,8 @@ spec:
|
||||
matchLabelsKey: matchLabelsValue
|
||||
maxSkew: 1
|
||||
minDomains: 5
|
||||
nodeAffinityPolicy: nodeAffinityPolicyValue
|
||||
nodeTaintsPolicy: nodeTaintsPolicyValue
|
||||
topologyKey: topologyKeyValue
|
||||
whenUnsatisfiable: whenUnsatisfiableValue
|
||||
volumes:
|
||||
|
@ -26,11 +26,13 @@ import (
|
||||
// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use
|
||||
// with apply.
|
||||
type TopologySpreadConstraintApplyConfiguration struct {
|
||||
MaxSkew *int32 `json:"maxSkew,omitempty"`
|
||||
TopologyKey *string `json:"topologyKey,omitempty"`
|
||||
WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
|
||||
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
|
||||
MinDomains *int32 `json:"minDomains,omitempty"`
|
||||
MaxSkew *int32 `json:"maxSkew,omitempty"`
|
||||
TopologyKey *string `json:"topologyKey,omitempty"`
|
||||
WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
|
||||
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
|
||||
MinDomains *int32 `json:"minDomains,omitempty"`
|
||||
NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
|
||||
NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
|
||||
}
|
||||
|
||||
// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with
|
||||
@ -78,3 +80,19 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32)
|
||||
b.MinDomains = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeAffinityPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeTaintsPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
@ -6865,6 +6865,12 @@ var schemaYAML = typed.YAMLObject(`types:
|
||||
- name: minDomains
|
||||
type:
|
||||
scalar: numeric
|
||||
- name: nodeAffinityPolicy
|
||||
type:
|
||||
scalar: string
|
||||
- name: nodeTaintsPolicy
|
||||
type:
|
||||
scalar: string
|
||||
- name: topologyKey
|
||||
type:
|
||||
scalar: string
|
||||
|
Loading…
Reference in New Issue
Block a user