Merge pull request #111194 from ravisantoshgudimetla/promote-maxSurge-ga
Promote DS max surge to GA
This commit is contained in:
commit
7156c96e5d
2
api/openapi-spec/swagger.json
generated
2
api/openapi-spec/swagger.json
generated
@ -1178,7 +1178,7 @@
|
||||
"properties": {
|
||||
"maxSurge": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate."
|
||||
"description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption."
|
||||
},
|
||||
"maxUnavailable": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString",
|
||||
|
@ -860,7 +860,7 @@
|
||||
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
}
|
||||
],
|
||||
"description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate."
|
||||
"description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption."
|
||||
},
|
||||
"maxUnavailable": {
|
||||
"allOf": [
|
||||
|
@ -625,7 +625,6 @@ type RollingUpdateDaemonSet struct {
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||
// +optional
|
||||
MaxSurge intstr.IntOrString
|
||||
}
|
||||
|
@ -28,11 +28,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid.
|
||||
@ -373,7 +371,7 @@ func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path, opts a
|
||||
// ValidateRollingUpdateDaemonSet validates a given RollingUpdateDaemonSet.
|
||||
func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) {
|
||||
|
||||
// Validate both fields are positive ints or have a percentage value
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
|
||||
@ -392,15 +390,6 @@ func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet,
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("maxUnavailable"), "cannot be 0 when maxSurge is 0"))
|
||||
}
|
||||
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 {
|
||||
// MaxUnavailable cannot be 0.
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0"))
|
||||
}
|
||||
// Validate that MaxUnavailable is not more than 100%.
|
||||
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
|
@ -3591,7 +3591,6 @@ func TestValidateReplicaSet(t *testing.T) {
|
||||
func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
ds *apps.RollingUpdateDaemonSet
|
||||
enableSurge bool
|
||||
expectError bool
|
||||
}{
|
||||
"invalid: unset": {
|
||||
@ -3637,45 +3636,40 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
MaxUnavailable: intstr.FromString("1%"),
|
||||
MaxSurge: intstr.FromString("1%"),
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
|
||||
"invalid: surge enabled, unavailable zero percent": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromString("0%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"invalid: surge enabled, unavailable zero": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromInt(0),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"valid: surge enabled, unavailable one": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromInt(1),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"valid: surge enabled, unavailable one percent": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromString("1%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"valid: surge enabled, unavailable 100%": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromString("100%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"invalid: surge enabled, unavailable greater than 100%": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: intstr.FromString("101%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
|
||||
@ -3683,39 +3677,33 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromString("0%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"invalid: surge enabled, surge zero": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromInt(0),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"valid: surge enabled, surge one": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromInt(1),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"valid: surge enabled, surge one percent": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromString("1%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"valid: surge enabled, surge 100%": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromString("100%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
},
|
||||
"invalid: surge enabled, surge greater than 100%": {
|
||||
ds: &apps.RollingUpdateDaemonSet{
|
||||
MaxSurge: intstr.FromString("101%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
|
||||
@ -3724,7 +3712,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
MaxUnavailable: intstr.FromString("1%"),
|
||||
MaxSurge: intstr.FromString("1%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
|
||||
@ -3733,7 +3720,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
MaxUnavailable: intstr.FromString("0%"),
|
||||
MaxSurge: intstr.FromString("0%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"invalid: surge enabled, surge and unavailable zero": {
|
||||
@ -3741,7 +3727,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
MaxUnavailable: intstr.FromInt(0),
|
||||
MaxSurge: intstr.FromInt(0),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
"invalid: surge enabled, surge and unavailable mixed zero": {
|
||||
@ -3749,13 +3734,11 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) {
|
||||
MaxUnavailable: intstr.FromInt(0),
|
||||
MaxSurge: intstr.FromString("0%"),
|
||||
},
|
||||
enableSurge: true,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
for tcName, tc := range testCases {
|
||||
t.Run(tcName, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, tc.enableSurge)()
|
||||
errs := ValidateRollingUpdateDaemonSet(tc.ds, field.NewPath("spec", "updateStrategy", "rollingUpdate"))
|
||||
if tc.expectError && len(errs) == 0 {
|
||||
t.Errorf("Unexpected success")
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@ -44,14 +43,12 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
@ -453,9 +450,6 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.Dae
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -471,7 +465,6 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpectationsOnRecreate(t *testing.T) {
|
||||
@ -679,9 +672,6 @@ func markPodReady(pod *v1.Pod) {
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -697,15 +687,11 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node by NodeAffinity.
|
||||
func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
nodeNum := 5
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -777,15 +763,11 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
t.Fatalf("did not find pods on nodes %+v", nodeMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit)
|
||||
// of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass
|
||||
func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -810,13 +792,9 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
|
||||
t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
strategies := updateStrategies()
|
||||
for _, strategy := range strategies {
|
||||
ds := newDaemonSet("foo")
|
||||
@ -844,13 +822,9 @@ func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
|
||||
t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -880,14 +854,10 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
||||
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should do nothing if there aren't any nodes
|
||||
func TestNoNodesDoesNothing(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, podControl, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -902,15 +872,11 @@ func TestNoNodesDoesNothing(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets without node selectors should launch on a single node in a
|
||||
// single node cluster.
|
||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -929,14 +895,10 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should place onto NotReady nodes
|
||||
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -960,7 +922,6 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
@ -1000,9 +961,6 @@ func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
|
||||
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||
podSpec.NodeName = "too-much-mem"
|
||||
@ -1038,7 +996,6 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should only place onto nodes with sufficient free resource and matched node selector
|
||||
@ -1076,9 +1033,6 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(
|
||||
|
||||
// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
|
||||
func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("simple")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1102,14 +1056,10 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets not take any actions when being deleted
|
||||
func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
|
||||
ds := newDaemonSet("foo")
|
||||
@ -1139,13 +1089,9 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
// Bare client says it IS deleted.
|
||||
ds := newDaemonSet("foo")
|
||||
@ -1175,7 +1121,6 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that if the node is already scheduled with a pod using a host port
|
||||
@ -1183,9 +1128,6 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
||||
//
|
||||
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
|
||||
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
podSpec := v1.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
@ -1218,14 +1160,10 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should place onto nodes that would not cause port conflicts
|
||||
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
podSpec1 := v1.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
@ -1267,7 +1205,6 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSetController should not sync DaemonSets with empty pod selectors.
|
||||
@ -1283,9 +1220,6 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
// this case even though it's empty pod selector matches all pods. The DaemonSetController
|
||||
// should detect this misconfiguration and choose not to sync the DaemonSet. We should
|
||||
// not observe a deletion of the pod on node1.
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1321,14 +1255,10 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||
func TestDealsWithExistingPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1347,14 +1277,10 @@ func TestDealsWithExistingPods(t *testing.T) {
|
||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 2, 5, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Daemon with node selector should launch pods on nodes matching selector.
|
||||
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
daemon := newDaemonSet("foo")
|
||||
daemon.Spec.UpdateStrategy = *strategy
|
||||
@ -1371,14 +1297,10 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
||||
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1399,14 +1321,10 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 4, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
||||
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1431,14 +1349,10 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 3, 20, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
||||
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, podControl, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -1455,14 +1369,10 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node name should launch pod on node with corresponding name.
|
||||
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1478,14 +1388,10 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node name that does not exist should not launch pods.
|
||||
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1501,14 +1407,10 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
||||
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1526,14 +1428,10 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
||||
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1551,7 +1449,6 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
|
||||
@ -1573,9 +1470,6 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||
|
||||
// Daemon with node affinity should launch pods on nodes matching affinity.
|
||||
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
daemon := newDaemonSet("foo")
|
||||
daemon.Spec.UpdateStrategy = *strategy
|
||||
@ -1609,13 +1503,9 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumberReadyStatus(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1658,13 +1548,9 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestObservedGeneration(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1696,7 +1582,6 @@ func TestObservedGeneration(t *testing.T) {
|
||||
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet controller should kill all failed pods and create at most 1 pod on every node.
|
||||
@ -1735,9 +1620,6 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
||||
|
||||
// DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet.
|
||||
func TestDaemonKillFailedPodsBackoff(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
t.Run(string(strategy.Type), func(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
@ -1803,15 +1685,11 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) {
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Daemonset should not remove a running pod from a node if the pod doesn't
|
||||
// tolerate the nodes NoSchedule taint
|
||||
func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("intolerant")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1837,15 +1715,11 @@ func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Daemonset should remove a running pod from a node if the pod doesn't
|
||||
// tolerate the nodes NoExecute taint
|
||||
func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("intolerant")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1871,14 +1745,10 @@ func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
|
||||
func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("intolerant")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1900,14 +1770,10 @@ func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
|
||||
func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("tolerate")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1930,14 +1796,10 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
|
||||
func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("simple")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1962,14 +1824,10 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
|
||||
func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("simple")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -1994,14 +1852,10 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should launch a pod on an untainted node when the pod has tolerations.
|
||||
func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("tolerate")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -2018,14 +1872,10 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
|
||||
func TestDaemonSetRespectsTermination(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -2048,7 +1898,6 @@ func TestDaemonSetRespectsTermination(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNodeTaint(node *v1.Node, taints []v1.Taint) {
|
||||
@ -2061,9 +1910,6 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
|
||||
|
||||
// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure/PIDPressure taints.
|
||||
func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("critical")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -2094,7 +1940,6 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setDaemonSetCritical(ds *apps.DaemonSet) {
|
||||
@ -2378,9 +2223,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
node := newNode("test-node", simpleDaemonSetLabel)
|
||||
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
||||
@ -2407,7 +2249,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should be resynced when node labels or taints changed
|
||||
@ -2489,9 +2330,6 @@ func TestUpdateNode(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, podControl, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -2530,7 +2368,6 @@ func TestUpdateNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should be resynced when non-daemon pods was deleted.
|
||||
@ -2674,9 +2511,6 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, podControl, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -2711,13 +2545,9 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -2757,13 +2587,9 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
|
||||
}
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodesToDaemonPods(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -2833,7 +2659,6 @@ func TestGetNodesToDaemonPods(t *testing.T) {
|
||||
t.Errorf("unexpected pod %v was returned", podName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNode(t *testing.T) {
|
||||
@ -2866,9 +2691,6 @@ func TestAddNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddPod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -2915,13 +2737,9 @@ func TestAddPod(t *testing.T) {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddPodOrphan(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -2957,13 +2775,9 @@ func TestAddPodOrphan(t *testing.T) {
|
||||
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3014,13 +2828,9 @@ func TestUpdatePod(t *testing.T) {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodOrphanSameLabels(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3047,13 +2857,9 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3084,13 +2890,9 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
@ -3118,13 +2920,9 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodControllerRefRemoved(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3152,13 +2950,9 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletePod(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3205,13 +2999,9 @@ func TestDeletePod(t *testing.T) {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletePodOrphan(t *testing.T) {
|
||||
dsMaxSurgeFeatureFlags := []bool{false, true}
|
||||
for _, isEnabled := range dsMaxSurgeFeatureFlags {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)()
|
||||
for _, strategy := range updateStrategies() {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
@ -3243,7 +3033,6 @@ func TestDeletePodOrphan(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bumpResourceVersion(obj metav1.Object) {
|
||||
@ -3285,7 +3074,6 @@ func TestSurgeDealsWithExistingPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSurgePreservesReadyOldPods(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
@ -3325,7 +3113,6 @@ func TestSurgePreservesReadyOldPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
@ -3372,7 +3159,6 @@ func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
@ -3412,7 +3198,6 @@ func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.MinReadySeconds = 15
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
||||
@ -3457,7 +3242,6 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.MinReadySeconds = 15
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
||||
|
@ -27,12 +27,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
@ -78,7 +75,6 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
@ -191,7 +187,6 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
@ -381,7 +376,6 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
Manager *daemonSetsController
|
||||
ds *apps.DaemonSet
|
||||
nodeToPods map[string][]*v1.Pod
|
||||
enableSurge bool
|
||||
maxSurge int
|
||||
maxUnavailable int
|
||||
emptyNodes int
|
||||
@ -536,7 +530,6 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
enableSurge: true,
|
||||
maxSurge: 1,
|
||||
maxUnavailable: 0,
|
||||
emptyNodes: 0,
|
||||
@ -566,7 +559,6 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
enableSurge: true,
|
||||
maxSurge: 2,
|
||||
maxUnavailable: 0,
|
||||
emptyNodes: 0,
|
||||
@ -605,8 +597,6 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, c.enableSurge)()
|
||||
|
||||
c.Manager.dsStore.Add(c.ds)
|
||||
nodeList, err := c.Manager.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
|
@ -25,9 +25,7 @@ import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
@ -137,9 +135,7 @@ func SurgeCount(ds *apps.DaemonSet, numberToSchedule int) (int, error) {
|
||||
if ds.Spec.UpdateStrategy.Type != apps.RollingUpdateDaemonSetStrategyType {
|
||||
return 0, nil
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
r := ds.Spec.UpdateStrategy.RollingUpdate
|
||||
if r == nil {
|
||||
return 0, nil
|
||||
|
@ -215,6 +215,7 @@ const (
|
||||
// owner: @smarterclayton
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.25
|
||||
// DaemonSets allow workloads to maintain availability during update per node
|
||||
DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge"
|
||||
|
||||
@ -862,7 +863,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
CronJobTimeZone: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22
|
||||
DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
|
4
pkg/generated/openapi/zz_generated.openapi.go
generated
4
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -3728,7 +3728,7 @@ func schema_k8sio_api_apps_v1_RollingUpdateDaemonSet(ref common.ReferenceCallbac
|
||||
},
|
||||
"maxSurge": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
|
||||
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
|
||||
Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"),
|
||||
},
|
||||
},
|
||||
@ -6376,7 +6376,7 @@ func schema_k8sio_api_apps_v1beta2_RollingUpdateDaemonSet(ref common.ReferenceCa
|
||||
},
|
||||
"maxSurge": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
|
||||
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
|
||||
Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"),
|
||||
},
|
||||
},
|
||||
|
@ -24,17 +24,14 @@ import (
|
||||
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/apis/apps/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
@ -82,7 +79,6 @@ func (daemonSetStrategy) PrepareForCreate(ctx context.Context, obj runtime.Objec
|
||||
daemonSet.Spec.TemplateGeneration = 1
|
||||
}
|
||||
|
||||
dropDaemonSetDisabledFields(daemonSet, nil)
|
||||
pod.DropDisabledTemplateFields(&daemonSet.Spec.Template, nil)
|
||||
}
|
||||
|
||||
@ -91,7 +87,6 @@ func (daemonSetStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
|
||||
newDaemonSet := obj.(*apps.DaemonSet)
|
||||
oldDaemonSet := old.(*apps.DaemonSet)
|
||||
|
||||
dropDaemonSetDisabledFields(newDaemonSet, oldDaemonSet)
|
||||
pod.DropDisabledTemplateFields(&newDaemonSet.Spec.Template, &oldDaemonSet.Spec.Template)
|
||||
|
||||
// update is not allowed to set status
|
||||
@ -121,35 +116,6 @@ func (daemonSetStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
|
||||
}
|
||||
}
|
||||
|
||||
// dropDaemonSetDisabledFields drops fields that are not used if their associated feature gates
|
||||
// are not enabled. The typical pattern is:
|
||||
// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) {
|
||||
// newSvc.Spec.MyFeature = nil
|
||||
// }
|
||||
func dropDaemonSetDisabledFields(newDS *apps.DaemonSet, oldDS *apps.DaemonSet) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) {
|
||||
if r := newDS.Spec.UpdateStrategy.RollingUpdate; r != nil {
|
||||
if daemonSetSurgeFieldsInUse(oldDS) {
|
||||
// we need to ensure that MaxUnavailable is non-zero to preserve previous behavior
|
||||
if r.MaxUnavailable.IntVal == 0 && r.MaxUnavailable.StrVal == "0%" {
|
||||
r.MaxUnavailable = intstr.FromInt(1)
|
||||
}
|
||||
} else {
|
||||
// clear the MaxSurge field and let validation deal with MaxUnavailable
|
||||
r.MaxSurge = intstr.IntOrString{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// daemonSetSurgeFieldsInUse returns true if fields related to daemonset update surge are set
|
||||
func daemonSetSurgeFieldsInUse(ds *apps.DaemonSet) bool {
|
||||
if ds == nil {
|
||||
return false
|
||||
}
|
||||
return ds.Spec.UpdateStrategy.RollingUpdate != nil && (ds.Spec.UpdateStrategy.RollingUpdate.MaxSurge.IntVal != 0 || ds.Spec.UpdateStrategy.RollingUpdate.MaxSurge.StrVal != "")
|
||||
}
|
||||
|
||||
// Validate validates a new daemon set.
|
||||
func (daemonSetStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
daemonSet := obj.(*apps.DaemonSet)
|
||||
|
@ -21,15 +21,10 @@ import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -139,207 +134,3 @@ func newDaemonSetWithSelectorLabels(selectorLabels map[string]string, templateGe
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeDaemonSetWithSurge(unavailable intstr.IntOrString, surge intstr.IntOrString) *apps.DaemonSet {
|
||||
return &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
UpdateStrategy: apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: unavailable,
|
||||
MaxSurge: surge,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropDisabledField(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
enableSurge bool
|
||||
ds *apps.DaemonSet
|
||||
old *apps.DaemonSet
|
||||
expect *apps.DaemonSet
|
||||
}{
|
||||
{
|
||||
name: "not surge, no update",
|
||||
enableSurge: false,
|
||||
ds: &apps.DaemonSet{},
|
||||
old: nil,
|
||||
expect: &apps.DaemonSet{},
|
||||
},
|
||||
{
|
||||
name: "not surge, field not used",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
old: nil,
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, field not used in old and new",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, field used",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
},
|
||||
{
|
||||
name: "not surge, field used, percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
},
|
||||
{
|
||||
name: "not surge, field used and cleared",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, field used and cleared, percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "surge, field not used",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
old: nil,
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "surge, field not used in old and new",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "surge, field used",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
old: nil,
|
||||
expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
},
|
||||
{
|
||||
name: "surge, field used, percent",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")),
|
||||
},
|
||||
{
|
||||
name: "surge, field used in old and new",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
},
|
||||
{
|
||||
name: "surge, allows both fields (validation must catch)",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)),
|
||||
},
|
||||
{
|
||||
name: "surge, allows change from unavailable to surge",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "surge, allows change from surge to unvailable",
|
||||
enableSurge: true,
|
||||
ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
},
|
||||
{
|
||||
name: "not surge, allows change from unavailable to surge",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, allows change from surge to unvailable",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)),
|
||||
old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}),
|
||||
expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, allows change from unavailable to surge, percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, allows change from surge to unvailable, percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromString("1%")),
|
||||
old: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}),
|
||||
expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, resets zero percent, one percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")),
|
||||
old: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.FromString("1%")),
|
||||
},
|
||||
{
|
||||
name: "not surge, resets and clears when zero percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}),
|
||||
old: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")),
|
||||
expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, sets zero percent, one percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")),
|
||||
old: nil,
|
||||
expect: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}),
|
||||
},
|
||||
{
|
||||
name: "not surge, sets and clears zero percent",
|
||||
enableSurge: false,
|
||||
ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}),
|
||||
old: nil,
|
||||
expect: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, tc.enableSurge)()
|
||||
old := tc.old.DeepCopy()
|
||||
|
||||
dropDaemonSetDisabledFields(tc.ds, tc.old)
|
||||
|
||||
// old obj should never be changed
|
||||
if !reflect.DeepEqual(tc.old, old) {
|
||||
t.Fatalf("old ds changed: %v", diff.ObjectReflectDiff(tc.old, old))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.ds, tc.expect) {
|
||||
t.Fatalf("unexpected ds spec: %v", diff.ObjectReflectDiff(tc.expect, tc.ds))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -513,7 +513,6 @@ message RollingUpdateDaemonSet {
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
@ -596,7 +596,6 @@ type RollingUpdateDaemonSet struct {
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||
// +optional
|
||||
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
|
||||
var map_RollingUpdateDaemonSet = map[string]string{
|
||||
"": "Spec to control the desired behavior of daemon set rolling update.",
|
||||
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
|
||||
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
|
||||
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
|
||||
}
|
||||
|
||||
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
|
||||
|
@ -519,7 +519,6 @@ message RollingUpdateDaemonSet {
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
@ -648,7 +648,6 @@ type RollingUpdateDaemonSet struct {
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||
// +optional
|
||||
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
|
||||
var map_RollingUpdateDaemonSet = map[string]string{
|
||||
"": "Spec to control the desired behavior of daemon set rolling update.",
|
||||
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
|
||||
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
|
||||
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
|
||||
}
|
||||
|
||||
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
|
||||
|
Loading…
Reference in New Issue
Block a user