Merge pull request #125461 from mimowo/pod-disruption-conditions-ga

Graduate PodDisruptionConditions to stable
This commit is contained in:
Kubernetes Prow Robot
2024-07-09 11:08:13 -07:00
committed by GitHub
19 changed files with 404 additions and 720 deletions

View File

@@ -82,9 +82,6 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test)
OOMScoreAdj = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("OOMScoreAdj"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))

View File

@@ -91,7 +91,7 @@ var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisrupt
}
})
f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) {
f.It("should add DisruptionTarget condition to the preempted pod", func(ctx context.Context) {
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
node := getNodeName(ctx, f)
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{

View File

@@ -31,7 +31,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/eviction"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@@ -45,6 +44,7 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/ptr"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -513,23 +513,19 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
})
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled", nodefeature.PodDisruptionConditions, func() {
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
pidsConsumed := int64(10000)
summary := eventuallyGetSummary(ctx)
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
initialConfig.EvictionMinimumReclaim = map[string]string{}
initialConfig.FeatureGates = map[string]bool{
string(features.PodDisruptionConditions): true,
}
})
disruptionTarget := v1.DisruptionTarget
specs := []podEvictSpec{
{
evictionPriority: 1,
pod: pidConsumingPod("fork-bomb-container", 30000),
wantPodDisruptionCondition: &disruptionTarget,
wantPodDisruptionCondition: ptr.To(v1.DisruptionTarget),
},
}
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)

View File

@@ -83,7 +83,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
}
})
f.Context("graceful node shutdown when PodDisruptionConditions are enabled", nodefeature.PodDisruptionConditions, func() {
f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", func() {
const (
pollInterval = 1 * time.Second
@@ -95,7 +95,6 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates = map[string]bool{
string(features.GracefulNodeShutdown): true,
string(features.PodDisruptionConditions): true,
string(features.GracefulNodeShutdownBasedOnPodPriority): false,
}
initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod}

View File

@@ -40,7 +40,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
@@ -50,12 +49,10 @@ import (
"k8s.io/client-go/restmapper"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/disruption"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/utils/ktesting"
)
@@ -346,36 +343,22 @@ func TestEvictionVersions(t *testing.T) {
// TestEvictionWithFinalizers tests eviction with the use of finalizers
func TestEvictionWithFinalizers(t *testing.T) {
cases := map[string]struct {
enablePodDisruptionConditions bool
phase v1.PodPhase
dryRun bool
wantDisruptionTargetCond bool
phase v1.PodPhase
dryRun bool
wantDisruptionTargetCond bool
}{
"terminal pod with PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
phase: v1.PodSucceeded,
wantDisruptionTargetCond: true,
"terminal pod": {
phase: v1.PodSucceeded,
wantDisruptionTargetCond: true,
},
"terminal pod with PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodSucceeded,
wantDisruptionTargetCond: false,
"running pod": {
phase: v1.PodRunning,
wantDisruptionTargetCond: true,
},
"running pod with PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
phase: v1.PodRunning,
wantDisruptionTargetCond: true,
},
"running pod with PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodRunning,
wantDisruptionTargetCond: false,
},
"running pod with PodDisruptionConditions enabled should not update conditions in dry-run mode": {
enablePodDisruptionConditions: true,
phase: v1.PodRunning,
dryRun: true,
wantDisruptionTargetCond: false,
"running pod should not update conditions in dry-run mode": {
phase: v1.PodRunning,
dryRun: true,
wantDisruptionTargetCond: false,
},
}
for name, tc := range cases {
@@ -386,7 +369,6 @@ func TestEvictionWithFinalizers(t *testing.T) {
ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
defer tCtx.Cancel("test has completed")
informers.Start(tCtx.Done())

View File

@@ -51,31 +51,21 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
nodeIndex := 1 // the exact node doesn't matter, pick one
tests := map[string]struct {
enablePodDisruptionConditions bool
enableSeparateTaintEvictionController bool
startStandaloneTaintEvictionController bool
wantPodEvicted bool
}{
"Test eviction for NoExecute taint added by user; pod condition added when PodDisruptionConditions enabled; separate taint eviction controller disabled": {
enablePodDisruptionConditions: true,
enableSeparateTaintEvictionController: false,
startStandaloneTaintEvictionController: false,
wantPodEvicted: true,
},
"Test eviction for NoExecute taint added by user; no pod condition added when PodDisruptionConditions disabled; separate taint eviction controller disabled": {
enablePodDisruptionConditions: false,
"Test eviction for NoExecute taint added by user; pod condition added; separate taint eviction controller disabled": {
enableSeparateTaintEvictionController: false,
startStandaloneTaintEvictionController: false,
wantPodEvicted: true,
},
"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled but not started": {
enablePodDisruptionConditions: false,
enableSeparateTaintEvictionController: true,
startStandaloneTaintEvictionController: false,
wantPodEvicted: false,
},
"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled and started": {
enablePodDisruptionConditions: false,
enableSeparateTaintEvictionController: true,
startStandaloneTaintEvictionController: true,
wantPodEvicted: true,
@@ -124,7 +114,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
},
}
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SeparateTaintEvictionController, test.enableSeparateTaintEvictionController)
testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
cs := testCtx.ClientSet
@@ -202,9 +191,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
t.Fatalf("Test Failed: error: %q, while getting updated pod", err)
}
_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions && cond == nil {
if test.wantPodEvicted && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
} else if !test.enablePodDisruptionConditions && cond != nil {
} else if !test.wantPodEvicted && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
}
})

View File

@@ -40,16 +40,14 @@ import (
// TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods
func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
tests := map[string]struct {
enablePodDisruptionConditions bool
enableJobPodReplacementPolicy bool
phase v1.PodPhase
wantPhase v1.PodPhase
wantDisruptionTarget *v1.PodCondition
}{
"PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
phase: v1.PodPending,
wantPhase: v1.PodFailed,
"pending pod": {
phase: v1.PodPending,
wantPhase: v1.PodFailed,
wantDisruptionTarget: &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
@@ -57,8 +55,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
Message: "PodGC: node no longer exists",
},
},
"PodDisruptionConditions and PodReplacementPolicy enabled": {
enablePodDisruptionConditions: true,
"pending pod; PodReplacementPolicy enabled": {
enableJobPodReplacementPolicy: true,
phase: v1.PodPending,
wantPhase: v1.PodFailed,
@@ -69,32 +66,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
Message: "PodGC: node no longer exists",
},
},
"Only PodReplacementPolicy enabled; no PodDisruptionCondition": {
enablePodDisruptionConditions: false,
enableJobPodReplacementPolicy: true,
phase: v1.PodPending,
wantPhase: v1.PodFailed,
"succeeded pod": {
phase: v1.PodSucceeded,
wantPhase: v1.PodSucceeded,
},
"PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodPending,
wantPhase: v1.PodPending,
},
"PodDisruptionConditions enabled; succeeded pod": {
enablePodDisruptionConditions: true,
phase: v1.PodSucceeded,
wantPhase: v1.PodSucceeded,
},
"PodDisruptionConditions enabled; failed pod": {
enablePodDisruptionConditions: true,
phase: v1.PodFailed,
wantPhase: v1.PodFailed,
"failed pod": {
phase: v1.PodFailed,
wantPhase: v1.PodFailed,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
testCtx := setup(t, "podgc-orphaned")
cs := testCtx.ClientSet
@@ -170,31 +153,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
// TestTerminatingOnOutOfServiceNode tests deletion pods terminating on out-of-service nodes
func TestTerminatingOnOutOfServiceNode(t *testing.T) {
tests := map[string]struct {
enablePodDisruptionConditions bool
enableJobPodReplacementPolicy bool
withFinalizer bool
wantPhase v1.PodPhase
}{
"pod has phase changed to Failed when PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
withFinalizer: true,
wantPhase: v1.PodFailed,
"pod has phase changed to Failed": {
withFinalizer: true,
wantPhase: v1.PodFailed,
},
"pod has phase unchanged when PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
withFinalizer: true,
wantPhase: v1.PodPending,
"pod is getting deleted when no finalizer": {
withFinalizer: false,
},
"pod is getting deleted when no finalizer and PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
withFinalizer: false,
},
"pod is getting deleted when no finalizer and PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
withFinalizer: false,
},
"pod has phase changed when PodDisruptionConditions disabled, but JobPodReplacementPolicy enabled": {
enablePodDisruptionConditions: false,
"pod has phase changed when JobPodReplacementPolicy enabled": {
enableJobPodReplacementPolicy: true,
withFinalizer: true,
wantPhase: v1.PodFailed,
@@ -203,7 +173,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
testCtx := setup(t, "podgc-out-of-service")
@@ -385,7 +354,6 @@ func TestPodGcForPodsWithDuplicatedFieldKeys(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
testCtx := setup(t, "podgc-orphaned")
cs := testCtx.ClientSet

View File

@@ -33,17 +33,14 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -200,41 +197,14 @@ func TestPreemption(t *testing.T) {
maxTokens := 1000
tests := []struct {
name string
existingPods []*v1.Pod
pod *v1.Pod
initTokens int
enablePreFilter bool
unresolvable bool
preemptedPodIndexes map[int]struct{}
enablePodDisruptionConditions bool
name string
existingPods []*v1.Pod
pod *v1.Pod
initTokens int
enablePreFilter bool
unresolvable bool
preemptedPodIndexes map[int]struct{}
}{
{
name: "basic pod preemption with PodDisruptionConditions enabled",
initTokens: maxTokens,
existingPods: []*v1.Pod{
initPausePod(&testutils.PausePodConfig{
Name: "victim-pod",
Namespace: testCtx.NS.Name,
Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
},
}),
},
pod: initPausePod(&testutils.PausePodConfig{
Name: "preemptor-pod",
Namespace: testCtx.NS.Name,
Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
},
}),
preemptedPodIndexes: map[int]struct{}{0: {}},
enablePodDisruptionConditions: true,
},
{
name: "basic pod preemption",
initTokens: maxTokens,
@@ -484,7 +454,6 @@ func TestPreemption(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
filter.Tokens = test.initTokens
filter.EnablePreFilter = test.enablePreFilter
filter.Unresolvable = test.unresolvable
@@ -513,10 +482,8 @@ func TestPreemption(t *testing.T) {
t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name)
}
_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions && cond == nil {
if cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} else if test.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
}
} else {
if p.DeletionTimestamp != nil {