feat: add NodeInclusionPolicy to TopologySpreadConstraint in PodSpec
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
@@ -580,6 +580,7 @@ func dropDisabledFields(
|
||||
}
|
||||
|
||||
dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec)
|
||||
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
|
||||
}
|
||||
|
||||
// dropDisabledTopologySpreadConstraintsFields removes disabled fields from PodSpec related
|
||||
@@ -647,6 +648,51 @@ func dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledNodeInclusionPolicyFields removes disabled fields from PodSpec related
|
||||
// to NodeInclusionPolicy only if it is not used by the old spec.
|
||||
func dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread) && podSpec != nil {
|
||||
if !nodeTaintsPolicyInUse(oldPodSpec) {
|
||||
for i := range podSpec.TopologySpreadConstraints {
|
||||
podSpec.TopologySpreadConstraints[i].NodeTaintsPolicy = nil
|
||||
}
|
||||
}
|
||||
if !nodeAffinityPolicyInUse(oldPodSpec) {
|
||||
for i := range podSpec.TopologySpreadConstraints {
|
||||
podSpec.TopologySpreadConstraints[i].NodeAffinityPolicy = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nodeAffinityPolicyInUse returns true if the pod spec is non-nil and has NodeAffinityPolicy field set
|
||||
// in TopologySpreadConstraints
|
||||
func nodeAffinityPolicyInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range podSpec.TopologySpreadConstraints {
|
||||
if c.NodeAffinityPolicy != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// nodeTaintsPolicyInUse returns true if the pod spec is non-nil and has NodeTaintsPolicy field set
|
||||
// in TopologySpreadConstraints
|
||||
func nodeTaintsPolicyInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range podSpec.TopologySpreadConstraints {
|
||||
if c.NodeTaintsPolicy != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ephemeralContainersInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
|
@@ -1768,3 +1768,247 @@ func TestDropOSField(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropNodeInclusionPolicyFields(t *testing.T) {
|
||||
ignore := api.NodeInclusionPolicyIgnore
|
||||
honor := api.NodeInclusionPolicyHonor
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
enabled bool
|
||||
podSpec *api.PodSpec
|
||||
oldPodSpec *api.PodSpec
|
||||
wantPodSpec *api.PodSpec
|
||||
}{
|
||||
{
|
||||
name: "feature disabled, both pods don't use the fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only old pod use NodeAffinityPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only old pod use NodeTaintsPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only current pod use NodeAffinityPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{{
|
||||
NodeAffinityPolicy: nil,
|
||||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, only current pod use NodeTaintsPolicy field",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, both pods use NodeAffinityPolicy fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &honor},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeAffinityPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, both pods use NodeTaintsPolicy fields",
|
||||
enabled: false,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &ignore},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &honor},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{NodeTaintsPolicy: &honor},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, both pods use the fields",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only old pod use NodeAffinityPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only old pod use NodeTaintsPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only current pod use NodeAffinityPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeAffinityPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, only current pod use NodeTaintsPolicy field",
|
||||
enabled: true,
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{},
|
||||
},
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
NodeTaintsPolicy: &honor,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, test.enabled)()
|
||||
|
||||
dropDisabledFields(test.podSpec, nil, test.oldPodSpec, nil)
|
||||
if diff := cmp.Diff(test.wantPodSpec, test.podSpec); diff != "" {
|
||||
t.Errorf("unexpected pod spec (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -5616,6 +5616,17 @@ const (
|
||||
ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
|
||||
)
|
||||
|
||||
// NodeInclusionPolicy defines the type of node inclusion policy
|
||||
// +enum
|
||||
type NodeInclusionPolicy string
|
||||
|
||||
const (
|
||||
// NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore"
|
||||
// NodeInclusionPolicyHonor means use this scheduling policy when calculating pod topology spread skew.
|
||||
NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor"
|
||||
)
|
||||
|
||||
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
|
||||
type TopologySpreadConstraint struct {
|
||||
// MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
@@ -5644,7 +5655,8 @@ type TopologySpreadConstraint struct {
|
||||
// We consider each <key, value> as a "bucket", and try to put balanced number
|
||||
// of pods into each bucket.
|
||||
// We define a domain as a particular instance of a topology.
|
||||
// Also, we define an eligible domain as a domain whose nodes match the node selector.
|
||||
// Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
// nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
|
||||
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
|
||||
// It's a required field.
|
||||
@@ -5702,6 +5714,25 @@ type TopologySpreadConstraint struct {
|
||||
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
|
||||
// +optional
|
||||
MinDomains *int32
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
// when calculating pod topology spread skew. Options are:
|
||||
// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeAffinityPolicy *NodeInclusionPolicy
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
// pod topology spread skew. Options are:
|
||||
// - Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
// has a toleration, are included.
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeTaintsPolicy *NodeInclusionPolicy
|
||||
}
|
||||
|
||||
// These are the built-in errors for PortStatus.
|
||||
|
4
pkg/apis/core/v1/zz_generated.conversion.go
generated
4
pkg/apis/core/v1/zz_generated.conversion.go
generated
@@ -7997,6 +7997,8 @@ func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in
|
||||
out.WhenUnsatisfiable = core.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
|
||||
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
|
||||
out.NodeAffinityPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
|
||||
out.NodeTaintsPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8011,6 +8013,8 @@ func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in
|
||||
out.WhenUnsatisfiable = v1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
|
||||
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
|
||||
out.NodeAffinityPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
|
||||
out.NodeTaintsPolicy = (*v1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -6490,6 +6490,12 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
allErrs = append(allErrs, validateMinDomains(subFldPath.Child("minDomains"), constraint.MinDomains, constraint.WhenUnsatisfiable)...)
|
||||
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeAffinityPolicy"), constraint.NodeAffinityPolicy); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeTaintsPolicy"), constraint.NodeTaintsPolicy); err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@@ -6547,6 +6553,22 @@ func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.Topo
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
supportedPodTopologySpreadNodePolicies = sets.NewString(string(core.NodeInclusionPolicyIgnore), string(core.NodeInclusionPolicyHonor))
|
||||
)
|
||||
|
||||
// validateNodeAffinityPolicy tests that the argument is a valid NodeInclusionPolicy.
|
||||
func validateNodeInclusionPolicy(fldPath *field.Path, policy *core.NodeInclusionPolicy) *field.Error {
|
||||
if policy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !supportedPodTopologySpreadNodePolicies.Has(string(*policy)) {
|
||||
return field.NotSupported(fldPath, policy, supportedPodTopologySpreadNodePolicies.List())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,,
|
||||
// .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used
|
||||
// during IP init and allocation.
|
||||
|
@@ -18926,6 +18926,12 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
||||
fieldPathTopologyKey := subFldPath0.Child("topologyKey")
|
||||
fieldPathWhenUnsatisfiable := subFldPath0.Child("whenUnsatisfiable")
|
||||
fieldPathTopologyKeyAndWhenUnsatisfiable := subFldPath0.Child("{topologyKey, whenUnsatisfiable}")
|
||||
nodeAffinityField := subFldPath0.Child("nodeAffinityPolicy")
|
||||
nodeTaintsField := subFldPath0.Child("nodeTaintsPolicy")
|
||||
unknown := core.NodeInclusionPolicy("Unknown")
|
||||
ignore := core.NodeInclusionPolicyIgnore
|
||||
honor := core.NodeInclusionPolicyHonor
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
constraints []core.TopologySpreadConstraint
|
||||
@@ -19055,6 +19061,49 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
||||
field.Duplicate(fieldPathTopologyKeyAndWhenUnsatisfiable, fmt.Sprintf("{%v, %v}", "k8s.io/zone", core.DoNotSchedule)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "supported policy name set on NodeAffinityPolicy and NodeTaintsPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{},
|
||||
},
|
||||
{
|
||||
name: "unsupported policy name set on NodeAffinityPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &unknown,
|
||||
NodeTaintsPolicy: &ignore,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{
|
||||
field.NotSupported(nodeAffinityField, &unknown, supportedPodTopologySpreadNodePolicies.List()),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unsupported policy name set on NodeTaintsPolicy",
|
||||
constraints: []core.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "k8s.io/zone",
|
||||
WhenUnsatisfiable: core.DoNotSchedule,
|
||||
NodeAffinityPolicy: &honor,
|
||||
NodeTaintsPolicy: &unknown,
|
||||
},
|
||||
},
|
||||
wantFieldErrors: []*field.Error{
|
||||
field.NotSupported(nodeTaintsField, &unknown, supportedPodTopologySpreadNodePolicies.List()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
10
pkg/apis/core/zz_generated.deepcopy.go
generated
10
pkg/apis/core/zz_generated.deepcopy.go
generated
@@ -5639,6 +5639,16 @@ func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint)
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeAffinityPolicy != nil {
|
||||
in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeTaintsPolicy != nil {
|
||||
in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
|
||||
*out = new(NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -803,6 +803,14 @@ const (
|
||||
//
|
||||
// Enables support for 'HostProcess' containers on Windows nodes.
|
||||
WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers"
|
||||
|
||||
// owner: @kerthcet
|
||||
// kep: http://kep.k8s.io/3094
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when
|
||||
// calculating pod topology spread skew.
|
||||
NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -1030,6 +1038,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
NodeInclusionPolicyInPodTopologySpread: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
||||
|
16
pkg/generated/openapi/zz_generated.openapi.go
generated
16
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -25417,7 +25417,7 @@ func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallb
|
||||
},
|
||||
"topologyKey": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes match the node selector. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
@@ -25444,6 +25444,20 @@ func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallb
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
"nodeAffinityPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"nodeTaintsPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"maxSkew", "topologyKey", "whenUnsatisfiable"},
|
||||
},
|
||||
|
@@ -17,13 +17,13 @@ limitations under the License.
|
||||
package v1beta3
|
||||
|
||||
import (
|
||||
"k8s.io/kube-scheduler/config/v1beta3"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kube-scheduler/config/v1beta3"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
Reference in New Issue
Block a user