diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f62db9cdcbc..b5e0608c64a 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1178,7 +1178,7 @@ "properties": { "maxSurge": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", - "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate." + "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption." }, "maxUnavailable": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", diff --git a/api/openapi-spec/v3/apis__apps__v1_openapi.json b/api/openapi-spec/v3/apis__apps__v1_openapi.json index 47cfcf57e34..400a9982ca5 100644 --- a/api/openapi-spec/v3/apis__apps__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apps__v1_openapi.json @@ -860,7 +860,7 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate." + "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption." }, "maxUnavailable": { "allOf": [ diff --git a/pkg/apis/apps/types.go b/pkg/apis/apps/types.go index d29904272b0..5b99047cd49 100644 --- a/pkg/apis/apps/types.go +++ b/pkg/apis/apps/types.go @@ -625,7 +625,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge intstr.IntOrString } diff --git a/pkg/apis/apps/validation/validation.go b/pkg/apis/apps/validation/validation.go index a8e06a2bfa6..89e644e07ce 100644 --- a/pkg/apis/apps/validation/validation.go +++ b/pkg/apis/apps/validation/validation.go @@ -28,11 +28,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/features" ) // ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid. @@ -373,34 +371,25 @@ func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path, opts a // ValidateRollingUpdateDaemonSet validates a given RollingUpdateDaemonSet. func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) { - // Validate both fields are positive ints or have a percentage value - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - // Validate that MaxUnavailable and MaxSurge are not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) + // Validate both fields are positive ints or have a percentage value + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - // Validate exactly one of MaxSurge or MaxUnavailable is non-zero - hasUnavailable := getIntOrPercentValue(rollingUpdate.MaxUnavailable) != 0 - hasSurge := getIntOrPercentValue(rollingUpdate.MaxSurge) != 0 - switch { - case hasUnavailable && hasSurge: - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxSurge"), rollingUpdate.MaxSurge, "may not be set when maxUnavailable is non-zero")) - case !hasUnavailable && !hasSurge: - allErrs = append(allErrs, field.Required(fldPath.Child("maxUnavailable"), "cannot be 0 when maxSurge is 0")) - } + // Validate that MaxUnavailable and MaxSurge are not more than 100%. + allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - } else { - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 { - // MaxUnavailable cannot be 0. - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0")) - } - // Validate that MaxUnavailable is not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + // Validate exactly one of MaxSurge or MaxUnavailable is non-zero + hasUnavailable := getIntOrPercentValue(rollingUpdate.MaxUnavailable) != 0 + hasSurge := getIntOrPercentValue(rollingUpdate.MaxSurge) != 0 + switch { + case hasUnavailable && hasSurge: + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxSurge"), rollingUpdate.MaxSurge, "may not be set when maxUnavailable is non-zero")) + case !hasUnavailable && !hasSurge: + allErrs = append(allErrs, field.Required(fldPath.Child("maxUnavailable"), "cannot be 0 when maxSurge is 0")) } + return allErrs } diff --git a/pkg/apis/apps/validation/validation_test.go b/pkg/apis/apps/validation/validation_test.go index d33d86ff12c..6d34ea5a532 100644 --- a/pkg/apis/apps/validation/validation_test.go +++ b/pkg/apis/apps/validation/validation_test.go @@ -3591,7 +3591,6 @@ func TestValidateReplicaSet(t *testing.T) { func TestDaemonSetUpdateMaxSurge(t *testing.T) { testCases := map[string]struct { ds *apps.RollingUpdateDaemonSet - enableSurge bool expectError bool }{ "invalid: unset": { @@ -3637,45 +3636,40 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { MaxUnavailable: intstr.FromString("1%"), MaxSurge: intstr.FromString("1%"), }, + expectError: true, }, "invalid: surge enabled, unavailable zero percent": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromString("0%"), }, - enableSurge: true, expectError: true, }, "invalid: surge enabled, unavailable zero": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromInt(0), }, - enableSurge: true, expectError: true, }, "valid: surge enabled, unavailable one": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromInt(1), }, - enableSurge: true, }, "valid: surge enabled, unavailable one percent": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromString("1%"), }, - enableSurge: true, }, "valid: surge enabled, unavailable 100%": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromString("100%"), }, - enableSurge: true, }, "invalid: surge enabled, unavailable greater than 100%": { ds: &apps.RollingUpdateDaemonSet{ MaxUnavailable: intstr.FromString("101%"), }, - enableSurge: true, expectError: true, }, @@ -3683,39 +3677,33 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromString("0%"), }, - enableSurge: true, expectError: true, }, "invalid: surge enabled, surge zero": { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromInt(0), }, - enableSurge: true, expectError: true, }, "valid: surge enabled, surge one": { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromInt(1), }, - enableSurge: true, }, "valid: surge enabled, surge one percent": { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromString("1%"), }, - enableSurge: true, }, "valid: surge enabled, surge 100%": { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromString("100%"), }, - enableSurge: true, }, "invalid: surge enabled, surge greater than 100%": { ds: &apps.RollingUpdateDaemonSet{ MaxSurge: intstr.FromString("101%"), }, - enableSurge: true, expectError: true, }, @@ -3724,7 +3712,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { MaxUnavailable: intstr.FromString("1%"), MaxSurge: intstr.FromString("1%"), }, - enableSurge: true, expectError: true, }, @@ -3733,7 +3720,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { MaxUnavailable: intstr.FromString("0%"), MaxSurge: intstr.FromString("0%"), }, - enableSurge: true, expectError: true, }, "invalid: surge enabled, surge and unavailable zero": { @@ -3741,7 +3727,6 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { MaxUnavailable: intstr.FromInt(0), MaxSurge: intstr.FromInt(0), }, - enableSurge: true, expectError: true, }, "invalid: surge enabled, surge and unavailable mixed zero": { @@ -3749,13 +3734,11 @@ func TestDaemonSetUpdateMaxSurge(t *testing.T) { MaxUnavailable: intstr.FromInt(0), MaxSurge: intstr.FromString("0%"), }, - enableSurge: true, expectError: true, }, } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, tc.enableSurge)() errs := ValidateRollingUpdateDaemonSet(tc.ds, field.NewPath("spec", "updateStrategy", "rollingUpdate")) if tc.expectError && len(errs) == 0 { t.Errorf("Unexpected success") diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 9328a0a05dc..32d9dc05b09 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/names" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -44,14 +43,12 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/securitycontext" labelsutil "k8s.io/kubernetes/pkg/util/labels" testingclock "k8s.io/utils/clock/testing" @@ -453,23 +450,19 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.Dae } func TestDeleteFinalStateUnknown(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 1, nil) - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - // DeletedFinalStateUnknown should queue the embedded DS if found. - manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) - enqueuedKey, _ := manager.queue.Get() - if enqueuedKey.(string) != "default/foo" { - t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + addNodes(manager.nodeStore, 0, 1, nil) + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + // DeletedFinalStateUnknown should queue the embedded DS if found. + manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) + enqueuedKey, _ := manager.queue.Get() + if enqueuedKey.(string) != "default/foo" { + t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) } } } @@ -679,103 +672,95 @@ func markPodReady(pod *v1.Pod) { // DaemonSets without node selectors should launch pods on every node. func TestSimpleDaemonSetLaunchesPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Error(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Error(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) } } // DaemonSets without node selectors should launch pods on every node by NodeAffinity. func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { nodeNum := 5 - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + addNodes(manager.nodeStore, 0, nodeNum, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0) + + if len(podControl.podIDMap) != nodeNum { + t.Fatalf("failed to create pods for DaemonSet") + } + + nodeMap := make(map[string]*v1.Node) + for _, node := range manager.nodeStore.List() { + n := node.(*v1.Node) + nodeMap[n.Name] = n + } + if len(nodeMap) != nodeNum { + t.Fatalf("not enough nodes in the store, expected: %v, got: %v", + nodeNum, len(nodeMap)) + } + + for _, pod := range podControl.podIDMap { + if len(pod.Spec.NodeName) != 0 { + t.Fatalf("the hostname of pod %v should be empty, but got %s", + pod.Name, pod.Spec.NodeName) } - addNodes(manager.nodeStore, 0, nodeNum, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) + if pod.Spec.Affinity == nil { + t.Fatalf("the Affinity of pod %s is nil.", pod.Name) + } + if pod.Spec.Affinity.NodeAffinity == nil { + t.Fatalf("the NodeAffinity of pod %s is nil.", pod.Name) + } + nodeSelector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + if nodeSelector == nil { + t.Fatalf("the node selector of pod %s is nil.", pod.Name) + } + if len(nodeSelector.NodeSelectorTerms) != 1 { + t.Fatalf("incorrect number of node selector terms in pod %s, expected: 1, got: %d.", + pod.Name, len(nodeSelector.NodeSelectorTerms)) } - expectSyncDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0) - - if len(podControl.podIDMap) != nodeNum { - t.Fatalf("failed to create pods for DaemonSet") + if len(nodeSelector.NodeSelectorTerms[0].MatchFields) != 1 { + t.Fatalf("incorrect number of fields in node selector term for pod %s, expected: 1, got: %d.", + pod.Name, len(nodeSelector.NodeSelectorTerms[0].MatchFields)) } - nodeMap := make(map[string]*v1.Node) - for _, node := range manager.nodeStore.List() { - n := node.(*v1.Node) - nodeMap[n.Name] = n - } - if len(nodeMap) != nodeNum { - t.Fatalf("not enough nodes in the store, expected: %v, got: %v", - nodeNum, len(nodeMap)) + field := nodeSelector.NodeSelectorTerms[0].MatchFields[0] + if field.Key == metav1.ObjectNameField { + if field.Operator != v1.NodeSelectorOpIn { + t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn) + } + + if len(field.Values) != 1 { + t.Fatalf("incorrect hostname in node affinity: expected 1, got %v", len(field.Values)) + } + delete(nodeMap, field.Values[0]) } + } - for _, pod := range podControl.podIDMap { - if len(pod.Spec.NodeName) != 0 { - t.Fatalf("the hostname of pod %v should be empty, but got %s", - pod.Name, pod.Spec.NodeName) - } - if pod.Spec.Affinity == nil { - t.Fatalf("the Affinity of pod %s is nil.", pod.Name) - } - if pod.Spec.Affinity.NodeAffinity == nil { - t.Fatalf("the NodeAffinity of pod %s is nil.", pod.Name) - } - nodeSelector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution - if nodeSelector == nil { - t.Fatalf("the node selector of pod %s is nil.", pod.Name) - } - if len(nodeSelector.NodeSelectorTerms) != 1 { - t.Fatalf("incorrect number of node selector terms in pod %s, expected: 1, got: %d.", - pod.Name, len(nodeSelector.NodeSelectorTerms)) - } - - if len(nodeSelector.NodeSelectorTerms[0].MatchFields) != 1 { - t.Fatalf("incorrect number of fields in node selector term for pod %s, expected: 1, got: %d.", - pod.Name, len(nodeSelector.NodeSelectorTerms[0].MatchFields)) - } - - field := nodeSelector.NodeSelectorTerms[0].MatchFields[0] - if field.Key == metav1.ObjectNameField { - if field.Operator != v1.NodeSelectorOpIn { - t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn) - } - - if len(field.Values) != 1 { - t.Fatalf("incorrect hostname in node affinity: expected 1, got %v", len(field.Values)) - } - delete(nodeMap, field.Values[0]) - } - } - - if len(nodeMap) != 0 { - t.Fatalf("did not find pods on nodes %+v", nodeMap) - } + if len(nodeMap) != 0 { + t.Fatalf("did not find pods on nodes %+v", nodeMap) } } } @@ -783,183 +768,159 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { // Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit) // of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - podControl.FakePodControl.CreateLimit = 10 - addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + podControl.FakePodControl.CreateLimit = 10 + addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) - expectedLimit := 0 - for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ { - expectedLimit += controller.SlowStartInitialBatchSize << pass - } - if podControl.FakePodControl.CreateCallCount > expectedLimit { - t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount) - } + expectedLimit := 0 + for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ { + expectedLimit += controller.SlowStartInitialBatchSize << pass + } + if podControl.FakePodControl.CreateCallCount > expectedLimit { + t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount) } } } func TestDaemonSetPodCreateExpectationsError(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - strategies := updateStrategies() - for _, strategy := range strategies { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - podControl.FakePodControl.CreateLimit = 10 - creationExpectations := 100 - addNodes(manager.nodeStore, 0, 100, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } + strategies := updateStrategies() + for _, strategy := range strategies { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + podControl.FakePodControl.CreateLimit = 10 + creationExpectations := 100 + addNodes(manager.nodeStore, 0, 100, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) - dsKey, err := controller.KeyFunc(ds) - if err != nil { - t.Fatalf("error get DaemonSets controller key: %v", err) - } + dsKey, err := controller.KeyFunc(ds) + if err != nil { + t.Fatalf("error get DaemonSets controller key: %v", err) + } - if !manager.expectations.SatisfiedExpectations(dsKey) { - t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations) - } + if !manager.expectations.SatisfiedExpectations(dsKey) { + t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations) } } } func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 5, nil) - expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) - - // Make sure the single sync() updated Status already for the change made - // during the manage() phase. - if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want { - t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want) } + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) + } + return false, nil, nil + }) + + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 5, nil) + expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) + + // Make sure the single sync() updated Status already for the change made + // during the manage() phase. + if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want { + t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want) } } } // DaemonSets should do nothing if there aren't any nodes func TestNoNodesDoesNothing(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSets without node selectors should launch on a single node in a // single node cluster. func TestOneNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.nodeStore.Add(newNode("only-node", nil)) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + err = manager.nodeStore.Add(newNode("only-node", nil)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSets should place onto NotReady nodes func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("not-ready", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - } - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + node := newNode("not-ready", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + } + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1000,43 +961,39 @@ func allocatableResources(memory, cpu string) v1.ResourceList { // DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("too-much-mem", "75M", "75m") - podSpec.NodeName = "too-much-mem" - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("too-much-mem", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) - case apps.RollingUpdateDaemonSetStrategyType: - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategy) - } + for _, strategy := range updateStrategies() { + podSpec := resourcePodSpec("too-much-mem", "75M", "75m") + podSpec.NodeName = "too-much-mem" + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec = podSpec + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node := newNode("too-much-mem", nil) + node.Status.Allocatable = allocatableResources("100M", "200m") + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ + Spec: podSpec, + }) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + switch strategy.Type { + case apps.OnDeleteDaemonSetStrategyType: + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + case apps.RollingUpdateDaemonSetStrategyType: + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) } } } @@ -1076,105 +1033,93 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod( // DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("network-unavailable", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, - } - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("network-unavailable", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, + } + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSets not take any actions when being deleted func TestDontDoAnythingIfBeingDeleted(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - now := metav1.Now() - ds.DeletionTimestamp = &now - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("not-too-much-mem", nil) - node.Status.Allocatable = allocatableResources("200M", "200m") - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec = podSpec + now := metav1.Now() + ds.DeletionTimestamp = &now + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + node := newNode("not-too-much-mem", nil) + node.Status.Allocatable = allocatableResources("200M", "200m") + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ + Spec: podSpec, + }) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - // Bare client says it IS deleted. - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - now := metav1.Now() - ds.DeletionTimestamp = &now - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - - // Lister (cache) says it's NOT deleted. - ds2 := *ds - ds2.DeletionTimestamp = nil - err = manager.dsStore.Add(&ds2) - if err != nil { - t.Fatal(err) - } - - // The existence of a matching orphan should block all actions in this state. - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + // Bare client says it IS deleted. + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + now := metav1.Now() + ds.DeletionTimestamp = &now + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + + // Lister (cache) says it's NOT deleted. + ds2 := *ds + ds2.DeletionTimestamp = nil + err = manager.dsStore.Add(&ds2) + if err != nil { + t.Fatal(err) + } + + // The existence of a matching orphan should block all actions in this state. + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -1183,90 +1128,82 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { // // Issue: https://github.com/kubernetes/kubernetes/issues/22309 func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - podSpec := v1.PodSpec{ - NodeName: "port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, + for _, strategy := range updateStrategies() { + podSpec := v1.PodSpec{ + NodeName: "port-conflict", + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, }}, - } - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("port-conflict", nil) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + }}, } + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node := newNode("port-conflict", nil) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec = podSpec + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSets should place onto nodes that would not cause port conflicts func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - podSpec1 := v1.PodSpec{ - NodeName: "no-port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 6661, - }}, + for _, strategy := range updateStrategies() { + podSpec1 := v1.PodSpec{ + NodeName: "no-port-conflict", + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 6661, }}, - } - podSpec2 := v1.PodSpec{ - NodeName: "no-port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 6662, - }}, - }}, - } - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec2 - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("no-port-conflict", nil) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.podStore.Add(&v1.Pod{ - Spec: podSpec1, - }) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + }}, } + podSpec2 := v1.PodSpec{ + NodeName: "no-port-conflict", + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 6662, + }}, + }}, + } + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec = podSpec2 + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node := newNode("no-port-conflict", nil) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ + Spec: podSpec1, + }) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1283,274 +1220,234 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { // this case even though it's empty pod selector matches all pods. The DaemonSetController // should detect this misconfiguration and choose not to sync the DaemonSet. We should // not observe a deletion of the pod on node1. - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ls := metav1.LabelSelector{} - ds.Spec.Selector = &ls - ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ls := metav1.LabelSelector{} + ds.Spec.Selector = &ls + ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.nodeStore.Add(newNode("node1", nil)) - if err != nil { - t.Fatal(err) - } - // Create pod not controlled by a daemonset. - err = manager.podStore.Add(&v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"bang": "boom"}, - Namespace: metav1.NamespaceDefault, - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - }) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 1) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + err = manager.nodeStore.Add(newNode("node1", nil)) + if err != nil { + t.Fatal(err) + } + // Create pod not controlled by a daemonset. + err = manager.podStore.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"bang": "boom"}, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, + }) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 1) } } // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. func TestDealsWithExistingPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 5, nil) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) - addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) - addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) - expectSyncDaemonSets(t, manager, ds, podControl, 2, 5, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 5, nil) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) + addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) + expectSyncDaemonSets(t, manager, ds, podControl, 2, 5, 0) } } // Daemon with node selector should launch pods on nodes matching selector. func TestSelectorDaemonLaunchesPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - daemon := newDaemonSet("foo") - daemon.Spec.UpdateStrategy = *strategy - daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(daemon) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - err = manager.dsStore.Add(daemon) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + for _, strategy := range updateStrategies() { + daemon := newDaemonSet("foo") + daemon.Spec.UpdateStrategy = *strategy + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(daemon) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + err = manager.dsStore.Add(daemon) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } // Daemon with node selector should delete pods from nodes that do not satisfy selector. func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 5, nil) - addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) - addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) - expectSyncDaemonSets(t, manager, ds, podControl, 5, 4, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) + expectSyncDaemonSets(t, manager, ds, podControl, 5, 4, 0) } } // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 5, nil) - addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2) - addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4) - addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13) - addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) - addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) - expectSyncDaemonSets(t, manager, ds, podControl, 3, 20, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4) + addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13) + addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) + expectSyncDaemonSets(t, manager, ds, podControl, 3, 20, 0) } } // DaemonSet with node selector which does not match any node labels should not launch pods. func TestBadSelectorDaemonDoesNothing(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet with node name should launch pod on node with corresponding name. func TestNameDaemonSetLaunchesPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeName = "node-0" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeName = "node-0" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet with node name that does not exist should not launch pods. func TestBadNameDaemonSetDoesNothing(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeName = "node-10" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeName = "node-10" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - ds.Spec.Template.Spec.NodeName = "node-6" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-6" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - ds.Spec.Template.Spec.NodeName = "node-0" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-0" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -1573,128 +1470,116 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) { // Daemon with node affinity should launch pods on nodes matching affinity. func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - daemon := newDaemonSet("foo") - daemon.Spec.UpdateStrategy = *strategy - daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "color", - Operator: v1.NodeSelectorOpIn, - Values: []string{simpleNodeLabel["color"]}, - }, + for _, strategy := range updateStrategies() { + daemon := newDaemonSet("foo") + daemon.Spec.UpdateStrategy = *strategy + daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "color", + Operator: v1.NodeSelectorOpIn, + Values: []string{simpleNodeLabel["color"]}, }, }, }, }, }, - } - - manager, podControl, _, err := newTestController(daemon) - if err != nil { - t.Fatalf("error creating DaemonSetsController: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - err = manager.dsStore.Add(daemon) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + }, } + + manager, podControl, _, err := newTestController(daemon) + if err != nil { + t.Fatalf("error creating DaemonSetsController: %v", err) + } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + err = manager.dsStore.Add(daemon) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } func TestNumberReadyStatus(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) } + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) + } + return false, nil, nil + }) + addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.NumberReady != 0 { - t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) - } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.NumberReady != 0 { + t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) + } - selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector) - daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector) - for _, pod := range daemonPods { - condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - pod.Status.Conditions = append(pod.Status.Conditions, condition) - } + selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector) + for _, pod := range daemonPods { + condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} + pod.Status.Conditions = append(pod.Status.Conditions, condition) + } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.NumberReady != 2 { - t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) - } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.NumberReady != 2 { + t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } } } func TestObservedGeneration(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Generation = 1 - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Generation = 1 + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - - addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.ObservedGeneration != ds.Generation { - t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) } + return false, nil, nil + }) + + addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.ObservedGeneration != ds.Generation { + t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) } } } @@ -1735,319 +1620,283 @@ func TestDaemonKillFailedPods(t *testing.T) { // DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet. func TestDaemonKillFailedPodsBackoff(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - t.Run(string(strategy.Type), func(t *testing.T) { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy + for _, strategy := range updateStrategies() { + t.Run(string(strategy.Type), func(t *testing.T) { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 1, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 1, nil) - nodeName := "node-0" - pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, simpleDaemonSetLabel, ds) + nodeName := "node-0" + pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, simpleDaemonSetLabel, ds) - // Add a failed Pod - pod.Status.Phase = v1.PodFailed - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } + // Add a failed Pod + pod.Status.Phase = v1.PodFailed + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } - backoffKey := failedPodsBackoffKey(ds, nodeName) + backoffKey := failedPodsBackoffKey(ds, nodeName) - // First sync will delete the pod, initializing backoff - expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1) - initialDelay := manager.failedPodsBackoff.Get(backoffKey) - if initialDelay <= 0 { - t.Fatal("Initial delay is expected to be set.") - } + // First sync will delete the pod, initializing backoff + expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1) + initialDelay := manager.failedPodsBackoff.Get(backoffKey) + if initialDelay <= 0 { + t.Fatal("Initial delay is expected to be set.") + } - resetCounters(manager) + resetCounters(manager) - // Immediate (second) sync gets limited by the backoff - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) - delay := manager.failedPodsBackoff.Get(backoffKey) - if delay != initialDelay { - t.Fatal("Backoff delay shouldn't be raised while waiting.") - } + // Immediate (second) sync gets limited by the backoff + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + delay := manager.failedPodsBackoff.Get(backoffKey) + if delay != initialDelay { + t.Fatal("Backoff delay shouldn't be raised while waiting.") + } - resetCounters(manager) + resetCounters(manager) - // Sleep to wait out backoff - fakeClock := manager.failedPodsBackoff.Clock + // Sleep to wait out backoff + fakeClock := manager.failedPodsBackoff.Clock - // Move just before the backoff end time - fakeClock.Sleep(delay - 1*time.Nanosecond) - if !manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { - t.Errorf("Backoff delay didn't last the whole waitout period.") - } + // Move just before the backoff end time + fakeClock.Sleep(delay - 1*time.Nanosecond) + if !manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { + t.Errorf("Backoff delay didn't last the whole waitout period.") + } - // Move to the backoff end time - fakeClock.Sleep(1 * time.Nanosecond) - if manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { - t.Fatal("Backoff delay hasn't been reset after the period has passed.") - } + // Move to the backoff end time + fakeClock.Sleep(1 * time.Nanosecond) + if manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { + t.Fatal("Backoff delay hasn't been reset after the period has passed.") + } - // After backoff time, it will delete the failed pod - expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1) - }) - } + // After backoff time, it will delete the failed pod + expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1) + }) } } // Daemonset should not remove a running pod from a node if the pod doesn't // tolerate the nodes NoSchedule taint func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - setNodeTaint(node, noScheduleTaints) - err = manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + setNodeTaint(node, noScheduleTaints) + err = manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // Daemonset should remove a running pod from a node if the pod doesn't // tolerate the nodes NoExecute taint func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - setNodeTaint(node, noExecuteTaints) - err = manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + setNodeTaint(node, noExecuteTaints) + err = manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0) } } // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, noScheduleTaints) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, noScheduleTaints) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint. func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("tolerate") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetToleration(ds, noScheduleTolerations) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, noScheduleTaints) - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("tolerate") + ds.Spec.UpdateStrategy = *strategy + setDaemonSetToleration(ds, noScheduleTolerations) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, noScheduleTaints) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, nodeNotReady) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - } - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, nodeNotReady) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + } + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute. func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, nodeUnreachable) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionUnknown}, - } - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, nodeUnreachable) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionUnknown}, + } + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on an untainted node when the pod has tolerations. func TestNodeDaemonLaunchesToleratePod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("tolerate") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetToleration(ds, noScheduleTolerations) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 1, nil) - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("tolerate") + ds.Spec.UpdateStrategy = *strategy + setDaemonSetToleration(ds, noScheduleTolerations) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 1, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestDaemonSetRespectsTermination(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) - pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds) - dt := metav1.Now() - pod.DeletionTimestamp = &dt - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) + pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds) + dt := metav1.Now() + pod.DeletionTimestamp = &dt + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -2061,39 +1910,35 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) { // DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure/PIDPressure taints. func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("critical") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetCritical(ds) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("resources-pressure", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, - {Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}, - {Type: v1.NodePIDPressure, Status: v1.ConditionTrue}, - } - node.Spec.Taints = []v1.Taint{ - {Key: v1.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule}, - {Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, - {Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, - } - err = manager.nodeStore.Add(node) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("critical") + ds.Spec.UpdateStrategy = *strategy + setDaemonSetCritical(ds) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("resources-pressure", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, + {Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}, + {Type: v1.NodePIDPressure, Status: v1.ConditionTrue}, + } + node.Spec.Taints = []v1.Taint{ + {Key: v1.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule}, + {Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, + {Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, + } + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -2378,33 +2223,29 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { } for i, c := range cases { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - node := newNode("test-node", simpleDaemonSetLabel) - node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) - node.Status.Allocatable = allocatableResources("100M", "1") - node.Spec.Unschedulable = c.nodeUnschedulable - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.nodeStore.Add(node) - for _, p := range c.podsOnNode { - manager.podStore.Add(p) - p.Spec.NodeName = "test-node" - manager.podNodeIndex.Add(p) - } - c.ds.Spec.UpdateStrategy = *strategy - shouldRun, shouldContinueRunning := NodeShouldRunDaemonPod(node, c.ds) + for _, strategy := range updateStrategies() { + node := newNode("test-node", simpleDaemonSetLabel) + node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) + node.Status.Allocatable = allocatableResources("100M", "1") + node.Spec.Unschedulable = c.nodeUnschedulable + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.nodeStore.Add(node) + for _, p := range c.podsOnNode { + manager.podStore.Add(p) + p.Spec.NodeName = "test-node" + manager.podNodeIndex.Add(p) + } + c.ds.Spec.UpdateStrategy = *strategy + shouldRun, shouldContinueRunning := NodeShouldRunDaemonPod(node, c.ds) - if shouldRun != c.shouldRun { - t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldRun, shouldRun) - } - if shouldContinueRunning != c.shouldContinueRunning { - t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning) - } + if shouldRun != c.shouldRun { + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldRun, shouldRun) + } + if shouldContinueRunning != c.shouldContinueRunning { + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning) } } } @@ -2489,45 +2330,41 @@ func TestUpdateNode(t *testing.T) { }, } for _, c := range cases { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.nodeStore.Add(c.oldNode) - if err != nil { - t.Fatal(err) - } - c.ds.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(c.ds) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + err = manager.nodeStore.Add(c.oldNode) + if err != nil { + t.Fatal(err) + } + c.ds.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(c.ds) + if err != nil { + t.Fatal(err) + } - expectedEvents := 0 - if c.expectedEventsFunc != nil { - expectedEvents = c.expectedEventsFunc(strategy.Type) - } - expectedCreates := 0 - if c.expectedCreates != nil { - expectedCreates = c.expectedCreates() - } - expectSyncDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents) + expectedEvents := 0 + if c.expectedEventsFunc != nil { + expectedEvents = c.expectedEventsFunc(strategy.Type) + } + expectedCreates := 0 + if c.expectedCreates != nil { + expectedCreates = c.expectedCreates() + } + expectSyncDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents) - manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { - if ds.Name == "ds" { - enqueued = true - } + manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { + if ds.Name == "ds" { + enqueued = true } + } - enqueued = false - manager.updateNode(c.oldNode, c.newNode) - if enqueued != c.shouldEnqueue { - t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) - } + enqueued = false + manager.updateNode(c.oldNode, c.newNode) + if enqueued != c.shouldEnqueue { + t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) } } } @@ -2674,164 +2511,152 @@ func TestDeleteNoDaemonPod(t *testing.T) { } for _, c := range cases { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.nodeStore.Add(c.node) + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + err = manager.nodeStore.Add(c.node) + if err != nil { + t.Fatal(err) + } + c.ds.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(c.ds) + if err != nil { + t.Fatal(err) + } + for _, pod := range c.existPods { + err = manager.podStore.Add(pod) if err != nil { t.Fatal(err) } - c.ds.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(c.ds) - if err != nil { - t.Fatal(err) - } - for _, pod := range c.existPods { - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } - } - switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType, apps.RollingUpdateDaemonSetStrategyType: - expectSyncDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategy) - } + } + switch strategy.Type { + case apps.OnDeleteDaemonSetStrategyType, apps.RollingUpdateDaemonSetStrategyType: + expectSyncDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategy) + } - enqueued = false - manager.deletePod(c.deletedPod) - if enqueued != c.shouldEnqueue { - t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) - } + enqueued = false + manager.deletePod(c.deletedPod) + if enqueued != c.shouldEnqueue { + t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) } } } } func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.dsStore.Add(ds) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 1, nil) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 1, nil) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds) - podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchFields: []v1.NodeSelectorRequirement{ - { - Key: metav1.ObjectNameField, - Operator: v1.NodeSelectorOpIn, - Values: []string{"node-2"}, - }, + podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds) + podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchFields: []v1.NodeSelectorRequirement{ + { + Key: metav1.ObjectNameField, + Operator: v1.NodeSelectorOpIn, + Values: []string{"node-2"}, }, }, }, }, }, - } - err = manager.podStore.Add(podScheduledUsingAffinity) - if err != nil { - t.Fatal(err) - } - expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0) + }, } + err = manager.podStore.Add(podScheduledUsingAffinity) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0) } } func TestGetNodesToDaemonPods(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager, _, _, err := newTestController(ds, ds2) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - err = manager.dsStore.Add(ds) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager, _, _, err := newTestController(ds, ds2) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } + addNodes(manager.nodeStore, 0, 2, nil) + + // These pods should be returned. + wantedPods := []*v1.Pod{ + newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds), + newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil), + newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds), + newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil), + } + failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds) + failedPod.Status = v1.PodStatus{Phase: v1.PodFailed} + wantedPods = append(wantedPods, failedPod) + for _, pod := range wantedPods { + manager.podStore.Add(pod) + } + + // These pods should be ignored. + ignoredPods := []*v1.Pod{ + newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds), + newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil), + newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), + } + for _, pod := range ignoredPods { + err = manager.podStore.Add(pod) if err != nil { t.Fatal(err) } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } - addNodes(manager.nodeStore, 0, 2, nil) + } - // These pods should be returned. - wantedPods := []*v1.Pod{ - newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds), - newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil), - newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds), - newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil), - } - failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds) - failedPod.Status = v1.PodStatus{Phase: v1.PodFailed} - wantedPods = append(wantedPods, failedPod) - for _, pod := range wantedPods { - manager.podStore.Add(pod) - } - - // These pods should be ignored. - ignoredPods := []*v1.Pod{ - newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds), - newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil), - newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), - } - for _, pod := range ignoredPods { - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) + nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds) + if err != nil { + t.Fatalf("getNodesToDaemonPods() error: %v", err) + } + gotPods := map[string]bool{} + for node, pods := range nodesToDaemonPods { + for _, pod := range pods { + if pod.Spec.NodeName != node { + t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName) } + gotPods[pod.Name] = true } - - nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds) - if err != nil { - t.Fatalf("getNodesToDaemonPods() error: %v", err) - } - gotPods := map[string]bool{} - for node, pods := range nodesToDaemonPods { - for _, pod := range pods { - if pod.Spec.NodeName != node { - t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName) - } - gotPods[pod.Name] = true - } - } - for _, pod := range wantedPods { - if !gotPods[pod.Name] { - t.Errorf("expected pod %v but didn't get it", pod.Name) - } - delete(gotPods, pod.Name) - } - for podName := range gotPods { - t.Errorf("unexpected pod %v was returned", podName) + } + for _, pod := range wantedPods { + if !gotPods[pod.Name] { + t.Errorf("expected pod %v but didn't get it", pod.Name) } + delete(gotPods, pod.Name) + } + for podName := range gotPods { + t.Errorf("unexpected pod %v was returned", podName) } } } @@ -2866,382 +2691,346 @@ func TestAddNode(t *testing.T) { } func TestAddPod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - manager.addPod(pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + manager.addPod(pod1) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) + } - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - manager.addPod(pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + manager.addPod(pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestAddPodOrphan(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - ds3 := newDaemonSet("foo3") - ds3.Spec.UpdateStrategy = *strategy - ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds3) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + ds3 := newDaemonSet("foo3") + ds3.Spec.UpdateStrategy = *strategy + ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds3) + if err != nil { + t.Fatal(err) + } - // Make pod an orphan. Expect matching sets to be queued. - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.addPod(pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { - t.Errorf("getQueuedKeys() = %v, want %v", got, want) - } + // Make pod an orphan. Expect matching sets to be queued. + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + manager.addPod(pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { + t.Errorf("getQueuedKeys() = %v, want %v", got, want) } } } func TestUpdatePod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod1 - bumpResourceVersion(pod1) - manager.updatePod(&prev, pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod1 + bumpResourceVersion(pod1) + manager.updatePod(&prev, pod1) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) + } - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - prev = *pod2 - bumpResourceVersion(pod2) - manager.updatePod(&prev, pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + prev = *pod2 + bumpResourceVersion(pod2) + manager.updatePod(&prev, pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestUpdatePodOrphanSameLabels(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - prev := *pod - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 0; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + prev := *pod + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 0; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestUpdatePodOrphanWithNewLabels(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - prev := *pod - prev.Labels = map[string]string{"foo2": "bar2"} - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { - t.Errorf("getQueuedKeys() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + prev := *pod + prev.Labels = map[string]string{"foo2": "bar2"} + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { + t.Errorf("getQueuedKeys() = %v, want %v", got, want) } } } func TestUpdatePodChangeControllerRef(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds2 := newDaemonSet("foo2") - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds2 := newDaemonSet("foo2") + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod - prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds2, controllerKind)} - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod + prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds2, controllerKind)} + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestUpdatePodControllerRefRemoved(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod - pod.OwnerReferences = nil - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod + pod.OwnerReferences = nil + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestDeletePod(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - manager.deletePod(pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + manager.deletePod(pod1) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) + } - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - manager.deletePod(pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + manager.deletePod(pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestDeletePodOrphan(t *testing.T) { - dsMaxSurgeFeatureFlags := []bool{false, true} - for _, isEnabled := range dsMaxSurgeFeatureFlags { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, isEnabled)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - ds3 := newDaemonSet("foo3") - ds3.Spec.UpdateStrategy = *strategy - ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - err = manager.dsStore.Add(ds1) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds2) - if err != nil { - t.Fatal(err) - } - err = manager.dsStore.Add(ds3) - if err != nil { - t.Fatal(err) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + ds3 := newDaemonSet("foo3") + ds3.Spec.UpdateStrategy = *strategy + ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds3) + if err != nil { + t.Fatal(err) + } - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.deletePod(pod) - if got, want := manager.queue.Len(), 0; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + manager.deletePod(pod) + if got, want := manager.queue.Len(), 0; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } @@ -3285,7 +3074,6 @@ func TestSurgeDealsWithExistingPods(t *testing.T) { } func TestSurgePreservesReadyOldPods(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1)) manager, podControl, _, err := newTestController(ds) @@ -3325,7 +3113,6 @@ func TestSurgePreservesReadyOldPods(t *testing.T) { } func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1)) manager, podControl, _, err := newTestController(ds) @@ -3372,7 +3159,6 @@ func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) { } func TestSurgeDeletesUnreadyOldPods(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1)) manager, podControl, _, err := newTestController(ds) @@ -3412,7 +3198,6 @@ func TestSurgeDeletesUnreadyOldPods(t *testing.T) { } func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") ds.Spec.MinReadySeconds = 15 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1)) @@ -3457,7 +3242,6 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) { } func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") ds.Spec.MinReadySeconds = 15 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1)) diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 278a08a8db5..157101de857 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -27,12 +27,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller/daemon/util" - "k8s.io/kubernetes/pkg/features" testingclock "k8s.io/utils/clock/testing" ) @@ -78,7 +75,6 @@ func TestDaemonSetUpdatesPods(t *testing.T) { } func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") manager, podControl, _, err := newTestController(ds) if err != nil { @@ -191,7 +187,6 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { } func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)() ds := newDaemonSet("foo") manager, podControl, _, err := newTestController(ds) if err != nil { @@ -381,7 +376,6 @@ func TestGetUnavailableNumbers(t *testing.T) { Manager *daemonSetsController ds *apps.DaemonSet nodeToPods map[string][]*v1.Pod - enableSurge bool maxSurge int maxUnavailable int emptyNodes int @@ -536,7 +530,6 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - enableSurge: true, maxSurge: 1, maxUnavailable: 0, emptyNodes: 0, @@ -566,7 +559,6 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - enableSurge: true, maxSurge: 2, maxUnavailable: 0, emptyNodes: 0, @@ -605,8 +597,6 @@ func TestGetUnavailableNumbers(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, c.enableSurge)() - c.Manager.dsStore.Add(c.ds) nodeList, err := c.Manager.nodeLister.List(labels.Everything()) if err != nil { diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index 5058f747df3..ad91d092393 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -25,9 +25,7 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" intstrutil "k8s.io/apimachinery/pkg/util/intstr" - utilfeature "k8s.io/apiserver/pkg/util/feature" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/features" ) // GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the @@ -137,9 +135,7 @@ func SurgeCount(ds *apps.DaemonSet, numberToSchedule int) (int, error) { if ds.Spec.UpdateStrategy.Type != apps.RollingUpdateDaemonSetStrategyType { return 0, nil } - if !utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) { - return 0, nil - } + r := ds.Spec.UpdateStrategy.RollingUpdate if r == nil { return 0, nil diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index e0b828a9f41..c2a4709c3aa 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -215,6 +215,7 @@ const ( // owner: @smarterclayton // alpha: v1.21 // beta: v1.22 + // GA: v1.25 // DaemonSets allow workloads to maintain availability during update per node DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge" @@ -862,7 +863,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CronJobTimeZone: {Default: false, PreRelease: featuregate.Alpha}, - DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22 + DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 4e650bd530e..ac6f925eb7e 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -3728,7 +3728,7 @@ func schema_k8sio_api_apps_v1_RollingUpdateDaemonSet(ref common.ReferenceCallbac }, "maxSurge": { SchemaProps: spec.SchemaProps{ - Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -6376,7 +6376,7 @@ func schema_k8sio_api_apps_v1beta2_RollingUpdateDaemonSet(ref common.ReferenceCa }, "maxSurge": { SchemaProps: spec.SchemaProps{ - Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, diff --git a/pkg/registry/apps/daemonset/strategy.go b/pkg/registry/apps/daemonset/strategy.go index 6cfd65ed7d5..8eca081dd76 100644 --- a/pkg/registry/apps/daemonset/strategy.go +++ b/pkg/registry/apps/daemonset/strategy.go @@ -24,17 +24,14 @@ import ( apivalidation "k8s.io/apimachinery/pkg/api/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/apps/validation" - "k8s.io/kubernetes/pkg/features" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -82,7 +79,6 @@ func (daemonSetStrategy) PrepareForCreate(ctx context.Context, obj runtime.Objec daemonSet.Spec.TemplateGeneration = 1 } - dropDaemonSetDisabledFields(daemonSet, nil) pod.DropDisabledTemplateFields(&daemonSet.Spec.Template, nil) } @@ -91,7 +87,6 @@ func (daemonSetStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime. newDaemonSet := obj.(*apps.DaemonSet) oldDaemonSet := old.(*apps.DaemonSet) - dropDaemonSetDisabledFields(newDaemonSet, oldDaemonSet) pod.DropDisabledTemplateFields(&newDaemonSet.Spec.Template, &oldDaemonSet.Spec.Template) // update is not allowed to set status @@ -121,35 +116,6 @@ func (daemonSetStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime. } } -// dropDaemonSetDisabledFields drops fields that are not used if their associated feature gates -// are not enabled. The typical pattern is: -// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) { -// newSvc.Spec.MyFeature = nil -// } -func dropDaemonSetDisabledFields(newDS *apps.DaemonSet, oldDS *apps.DaemonSet) { - if !utilfeature.DefaultFeatureGate.Enabled(features.DaemonSetUpdateSurge) { - if r := newDS.Spec.UpdateStrategy.RollingUpdate; r != nil { - if daemonSetSurgeFieldsInUse(oldDS) { - // we need to ensure that MaxUnavailable is non-zero to preserve previous behavior - if r.MaxUnavailable.IntVal == 0 && r.MaxUnavailable.StrVal == "0%" { - r.MaxUnavailable = intstr.FromInt(1) - } - } else { - // clear the MaxSurge field and let validation deal with MaxUnavailable - r.MaxSurge = intstr.IntOrString{} - } - } - } -} - -// daemonSetSurgeFieldsInUse returns true if fields related to daemonset update surge are set -func daemonSetSurgeFieldsInUse(ds *apps.DaemonSet) bool { - if ds == nil { - return false - } - return ds.Spec.UpdateStrategy.RollingUpdate != nil && (ds.Spec.UpdateStrategy.RollingUpdate.MaxSurge.IntVal != 0 || ds.Spec.UpdateStrategy.RollingUpdate.MaxSurge.StrVal != "") -} - // Validate validates a new daemon set. func (daemonSetStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { daemonSet := obj.(*apps.DaemonSet) diff --git a/pkg/registry/apps/daemonset/strategy_test.go b/pkg/registry/apps/daemonset/strategy_test.go index 82839fbbfa9..cc8929eaec7 100644 --- a/pkg/registry/apps/daemonset/strategy_test.go +++ b/pkg/registry/apps/daemonset/strategy_test.go @@ -21,15 +21,10 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/features" ) const ( @@ -139,207 +134,3 @@ func newDaemonSetWithSelectorLabels(selectorLabels map[string]string, templateGe }, } } - -func makeDaemonSetWithSurge(unavailable intstr.IntOrString, surge intstr.IntOrString) *apps.DaemonSet { - return &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - UpdateStrategy: apps.DaemonSetUpdateStrategy{ - Type: apps.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &apps.RollingUpdateDaemonSet{ - MaxUnavailable: unavailable, - MaxSurge: surge, - }, - }, - }, - } -} - -func TestDropDisabledField(t *testing.T) { - testCases := []struct { - name string - enableSurge bool - ds *apps.DaemonSet - old *apps.DaemonSet - expect *apps.DaemonSet - }{ - { - name: "not surge, no update", - enableSurge: false, - ds: &apps.DaemonSet{}, - old: nil, - expect: &apps.DaemonSet{}, - }, - { - name: "not surge, field not used", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - old: nil, - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - }, - { - name: "not surge, field not used in old and new", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - }, - { - name: "not surge, field used", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - }, - { - name: "not surge, field used, percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - }, - { - name: "not surge, field used and cleared", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - }, - { - name: "not surge, field used and cleared, percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - }, - { - name: "surge, field not used", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - old: nil, - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - }, - { - name: "surge, field not used in old and new", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - }, - { - name: "surge, field used", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - old: nil, - expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - }, - { - name: "surge, field used, percent", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromString("1%")), - }, - { - name: "surge, field used in old and new", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - }, - { - name: "surge, allows both fields (validation must catch)", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.FromInt(1)), - }, - { - name: "surge, allows change from unavailable to surge", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - }, - { - name: "surge, allows change from surge to unvailable", - enableSurge: true, - ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - }, - { - name: "not surge, allows change from unavailable to surge", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - expect: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - }, - { - name: "not surge, allows change from surge to unvailable", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromInt(1)), - old: makeDaemonSetWithSurge(intstr.FromInt(2), intstr.IntOrString{}), - expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.IntOrString{}), - }, - { - name: "not surge, allows change from unavailable to surge, percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}), - }, - { - name: "not surge, allows change from surge to unvailable, percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.FromString("1%")), - old: makeDaemonSetWithSurge(intstr.FromString("2%"), intstr.IntOrString{}), - expect: makeDaemonSetWithSurge(intstr.IntOrString{}, intstr.IntOrString{}), - }, - { - name: "not surge, resets zero percent, one percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")), - old: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.FromString("1%")), - }, - { - name: "not surge, resets and clears when zero percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}), - old: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")), - expect: makeDaemonSetWithSurge(intstr.FromInt(1), intstr.IntOrString{}), - }, - { - name: "not surge, sets zero percent, one percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.FromString("1%")), - old: nil, - expect: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}), - }, - { - name: "not surge, sets and clears zero percent", - enableSurge: false, - ds: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}), - old: nil, - expect: makeDaemonSetWithSurge(intstr.FromString("0%"), intstr.IntOrString{}), - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, tc.enableSurge)() - old := tc.old.DeepCopy() - - dropDaemonSetDisabledFields(tc.ds, tc.old) - - // old obj should never be changed - if !reflect.DeepEqual(tc.old, old) { - t.Fatalf("old ds changed: %v", diff.ObjectReflectDiff(tc.old, old)) - } - - if !reflect.DeepEqual(tc.ds, tc.expect) { - t.Fatalf("unexpected ds spec: %v", diff.ObjectReflectDiff(tc.expect, tc.ds)) - } - }) - } - -} diff --git a/staging/src/k8s.io/api/apps/v1/generated.proto b/staging/src/k8s.io/api/apps/v1/generated.proto index c5180c7fcfb..f728176f83e 100644 --- a/staging/src/k8s.io/api/apps/v1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1/generated.proto @@ -513,7 +513,6 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } diff --git a/staging/src/k8s.io/api/apps/v1/types.go b/staging/src/k8s.io/api/apps/v1/types.go index 34e10bf9632..c6c87c93c6e 100644 --- a/staging/src/k8s.io/api/apps/v1/types.go +++ b/staging/src/k8s.io/api/apps/v1/types.go @@ -596,7 +596,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } diff --git a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 23ef0ba2616..448fb02da84 100644 --- a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.proto b/staging/src/k8s.io/api/apps/v1beta2/generated.proto index 64dad8dc1ad..6c92b5260b7 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/generated.proto +++ b/staging/src/k8s.io/api/apps/v1beta2/generated.proto @@ -519,7 +519,6 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } diff --git a/staging/src/k8s.io/api/apps/v1beta2/types.go b/staging/src/k8s.io/api/apps/v1beta2/types.go index aeb707407c6..10f437d6a96 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/types.go +++ b/staging/src/k8s.io/api/apps/v1beta2/types.go @@ -648,7 +648,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } diff --git a/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index e3d3a7e5b62..3bc4af4c99f 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {