Fixup incorrect use of DefaultFeatureGate.Set in tests
This commit is contained in:
parent
d440ecdd3b
commit
4dca07ef7e
@ -20,6 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -34,10 +35,7 @@ func TestDropAlphaProcMountType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable alpha feature ProcMountType
|
||||
err1 := utilfeature.DefaultFeatureGate.Set("ProcMountType=true")
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to enable feature gate for ProcMountType: %v", err1)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, true)()
|
||||
|
||||
// now test dropping the fields - should not be dropped
|
||||
DropDisabledAlphaFields(&psp.Spec)
|
||||
@ -51,10 +49,7 @@ func TestDropAlphaProcMountType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Disable alpha feature ProcMountType
|
||||
err := utilfeature.DefaultFeatureGate.Set("ProcMountType=false")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to disable feature gate for ProcMountType: %v", err)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, false)()
|
||||
|
||||
// now test dropping the fields
|
||||
DropDisabledAlphaFields(&psp.Spec)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -74,14 +75,6 @@ func featureToggle(feature utilfeature.Feature) []string {
|
||||
}
|
||||
|
||||
func TestValidateJob(t *testing.T) {
|
||||
ttlEnabled := utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished)
|
||||
defer func() {
|
||||
err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TTLAfterFinished, ttlEnabled))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err)
|
||||
}
|
||||
}()
|
||||
|
||||
validManualSelector := getValidManualSelector()
|
||||
validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector)
|
||||
validGeneratedSelector := getValidGeneratedSelector()
|
||||
@ -231,11 +224,8 @@ func TestValidateJob(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, setFeature := range featureToggle(features.TTLAfterFinished) {
|
||||
// Set error cases based on if TTLAfterFinished feature is enabled or not
|
||||
if err := utilfeature.DefaultFeatureGate.Set(setFeature); err != nil {
|
||||
t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err)
|
||||
}
|
||||
for _, setFeature := range []bool{true, false} {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TTLAfterFinished, setFeature)()
|
||||
ttlCase := "spec.ttlSecondsAfterFinished:must be greater than or equal to 0"
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) {
|
||||
errorCases[ttlCase] = batch.Job{
|
||||
|
@ -18,7 +18,6 @@ package validation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -784,11 +783,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable alpha feature VolumeSnapshotDataSource
|
||||
err := utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=true")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for VolumeSnapshotDataSource: %v", err)
|
||||
return
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, true)()
|
||||
for _, tc := range successTestCases {
|
||||
if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
@ -800,11 +795,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Disable alpha feature VolumeSnapshotDataSource
|
||||
err = utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=false")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to disable feature gate for VolumeSnapshotDataSource: %v", err)
|
||||
return
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, false)()
|
||||
for _, tc := range successTestCases {
|
||||
if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) == 0 {
|
||||
t.Errorf("expected failure: %v", errs)
|
||||
@ -4897,8 +4888,7 @@ func TestValidateVolumeMounts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateDisabledSubpath(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)()
|
||||
|
||||
volumes := []core.Volume{
|
||||
{Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}},
|
||||
@ -5734,16 +5724,7 @@ func TestValidateRestartPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateDNSPolicy(t *testing.T) {
|
||||
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
|
||||
defer func() {
|
||||
// Restoring the old value.
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
|
||||
t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil {
|
||||
t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)()
|
||||
|
||||
successCases := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault, core.DNSPolicy(core.DNSClusterFirst), core.DNSNone}
|
||||
for _, policy := range successCases {
|
||||
@ -5761,16 +5742,7 @@ func TestValidateDNSPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidatePodDNSConfig(t *testing.T) {
|
||||
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
|
||||
defer func() {
|
||||
// Restoring the old value.
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
|
||||
t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil {
|
||||
t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)()
|
||||
|
||||
generateTestSearchPathFunc := func(numChars int) string {
|
||||
res := ""
|
||||
@ -5932,16 +5904,7 @@ func TestValidatePodDNSConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidatePodReadinessGates(t *testing.T) {
|
||||
podReadinessGatesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodReadinessGates)
|
||||
defer func() {
|
||||
// Restoring the old value.
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%v", features.PodReadinessGates, podReadinessGatesEnabled)); err != nil {
|
||||
t.Errorf("Failed to restore PodReadinessGates feature gate: %v", err)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodReadinessGates)); err != nil {
|
||||
t.Errorf("Failed to enable PodReadinessGates feature gate: %v", err)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadinessGates, true)()
|
||||
|
||||
successCases := []struct {
|
||||
desc string
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
@ -482,19 +483,14 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
|
||||
for _, fg := range gates {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
func() {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTargetNodeName(t *testing.T) {
|
||||
testFun := func(t *testing.T) {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -181,7 +182,7 @@ type podToMake struct {
|
||||
|
||||
// TestMemoryPressure
|
||||
func TestMemoryPressure(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -399,10 +400,9 @@ func parseQuantity(value string) resource.Quantity {
|
||||
}
|
||||
|
||||
func TestDiskPressureNodeFs(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{
|
||||
string(features.LocalStorageCapacityIsolation): true,
|
||||
string(features.PodPriority): true,
|
||||
})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
podsToMake := []podToMake{
|
||||
@ -600,7 +600,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
||||
|
||||
// TestMinReclaim verifies that min-reclaim works as desired.
|
||||
func TestMinReclaim(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -739,10 +739,9 @@ func TestMinReclaim(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeReclaimFuncs(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{
|
||||
string(features.PodPriority): true,
|
||||
string(features.LocalStorageCapacityIsolation): true,
|
||||
})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
podsToMake := []podToMake{
|
||||
@ -918,7 +917,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) {
|
||||
pod := newPod(name, priority, []v1.Container{
|
||||
newContainer(name, requests, limits),
|
||||
@ -1140,7 +1139,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
|
||||
// TestCriticalPodsAreNotEvicted
|
||||
func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
@ -1210,7 +1209,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable critical pod annotation feature gate
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
|
||||
// induce soft threshold
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
|
||||
@ -1255,7 +1254,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
}
|
||||
|
||||
// Disable critical pod annotation feature gate
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): false})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)()
|
||||
|
||||
// induce memory pressure!
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
@ -1275,7 +1274,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
|
||||
// TestAllocatableMemoryPressure
|
||||
func TestAllocatableMemoryPressure(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true})
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
podMaker := makePodWithMemoryStats
|
||||
summaryStatsMaker := makeMemoryStats
|
||||
podsToMake := []podToMake{
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
@ -423,7 +424,8 @@ func thresholdEqual(a evictionapi.Threshold, b evictionapi.Threshold) bool {
|
||||
}
|
||||
|
||||
func TestOrderedByExceedsRequestMemory(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
|
||||
below := newPod("below-requests", -1, []v1.Container{
|
||||
newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -450,8 +452,8 @@ func TestOrderedByExceedsRequestMemory(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedByExceedsRequestDisk(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
below := newPod("below-requests", -1, []v1.Container{
|
||||
newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -478,7 +480,7 @@ func TestOrderedByExceedsRequestDisk(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedByPriority(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
low := newPod("low-priority", -134, []v1.Container{
|
||||
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -501,7 +503,7 @@ func TestOrderedByPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedByPriorityDisabled(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, false)()
|
||||
low := newPod("low-priority", lowPriority, []v1.Container{
|
||||
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
@ -525,7 +527,7 @@ func TestOrderedByPriorityDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedbyDisk(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
|
||||
newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -592,7 +594,7 @@ func TestOrderedbyDisk(t *testing.T) {
|
||||
|
||||
// Tests that we correctly ignore disk requests when the local storage feature gate is disabled.
|
||||
func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.LocalStorageCapacityIsolation))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, false)()
|
||||
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
|
||||
newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -658,8 +660,8 @@ func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrderedbyInodes(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
low := newPod("low", defaultPriority, []v1.Container{
|
||||
newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -702,8 +704,8 @@ func TestOrderedbyInodes(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer
|
||||
func TestOrderedByPriorityDisk(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
|
||||
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -787,7 +789,7 @@ func TestOrderedByPriorityDisk(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer
|
||||
func TestOrderedByPriorityInodes(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, []v1.Volume{
|
||||
@ -880,7 +882,7 @@ func TestOrderedByMemory(t *testing.T) {
|
||||
|
||||
// TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request.
|
||||
func TestOrderedByPriorityMemory(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
|
||||
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
|
||||
}, nil)
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@ -2254,19 +2255,14 @@ func runVolumeManager(kubelet *Kubelet) chan struct{} {
|
||||
|
||||
func forEachFeatureGate(t *testing.T, fs []utilfeature.Feature, tf func(t *testing.T)) {
|
||||
for _, fg := range fs {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
func() {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
|
||||
t.Run(fmt.Sprintf("%v(%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort pods by UID.
|
||||
type podsByUID []*v1.Pod
|
||||
|
@ -24,6 +24,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
@ -67,15 +69,15 @@ func TestContainerLabels(t *testing.T) {
|
||||
|
||||
var tests = []struct {
|
||||
description string
|
||||
featuresCreated string // Features enabled when container is created
|
||||
featuresStatus string // Features enabled when container status is read
|
||||
featuresCreated bool // Features enabled when container is created
|
||||
featuresStatus bool // Features enabled when container status is read
|
||||
typeLabel kubecontainer.ContainerType
|
||||
expected *labeledContainerInfo
|
||||
}{
|
||||
{
|
||||
"Debug containers disabled",
|
||||
"DebugContainers=False",
|
||||
"DebugContainers=False",
|
||||
false,
|
||||
false,
|
||||
"ignored",
|
||||
&labeledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
@ -87,8 +89,8 @@ func TestContainerLabels(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Regular containers",
|
||||
"DebugContainers=True",
|
||||
"DebugContainers=True",
|
||||
true,
|
||||
true,
|
||||
kubecontainer.ContainerTypeRegular,
|
||||
&labeledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
@ -100,8 +102,8 @@ func TestContainerLabels(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Init containers",
|
||||
"DebugContainers=True",
|
||||
"DebugContainers=True",
|
||||
true,
|
||||
true,
|
||||
kubecontainer.ContainerTypeInit,
|
||||
&labeledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
@ -113,8 +115,8 @@ func TestContainerLabels(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Created without type label",
|
||||
"DebugContainers=False",
|
||||
"DebugContainers=True",
|
||||
false,
|
||||
true,
|
||||
"ignored",
|
||||
&labeledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
@ -126,8 +128,8 @@ func TestContainerLabels(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Created with type label, subsequently disabled",
|
||||
"DebugContainers=True",
|
||||
"DebugContainers=False",
|
||||
true,
|
||||
false,
|
||||
kubecontainer.ContainerTypeRegular,
|
||||
&labeledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
@ -141,15 +143,16 @@ func TestContainerLabels(t *testing.T) {
|
||||
|
||||
// Test whether we can get right information from label
|
||||
for _, test := range tests {
|
||||
utilfeature.DefaultFeatureGate.Set(test.featuresCreated)
|
||||
func() {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresCreated)()
|
||||
labels := newContainerLabels(container, pod, test.typeLabel)
|
||||
utilfeature.DefaultFeatureGate.Set(test.featuresStatus)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresStatus)()
|
||||
containerInfo := getContainerInfoFromLabels(labels)
|
||||
if !reflect.DeepEqual(containerInfo, test.expected) {
|
||||
t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo)
|
||||
}
|
||||
}()
|
||||
}
|
||||
utilfeature.DefaultFeatureGate.Set("DebugContainers=False")
|
||||
}
|
||||
|
||||
func TestContainerAnnotations(t *testing.T) {
|
||||
|
@ -29,7 +29,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -266,14 +268,6 @@ func TestMergeDNSOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodDNSType(t *testing.T) {
|
||||
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
|
||||
defer func() {
|
||||
// Restoring the old value.
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
|
||||
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
@ -361,9 +355,8 @@ func TestGetPodDNSType(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil {
|
||||
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)()
|
||||
|
||||
if tc.hasClusterDNS {
|
||||
configurer.clusterDNS = testClusterDNS
|
||||
@ -378,11 +371,12 @@ func TestGetPodDNSType(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("%s: GetPodDNSType(%v) got no error, want error", tc.desc, pod)
|
||||
}
|
||||
continue
|
||||
return
|
||||
}
|
||||
if resType != tc.expectedDNSType {
|
||||
t.Errorf("%s: GetPodDNSType(%v)=%v, want %v", tc.desc, pod, resType, tc.expectedDNSType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -482,14 +476,6 @@ func TestGetPodDNS(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodDNSCustom(t *testing.T) {
|
||||
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
|
||||
defer func() {
|
||||
// Restoring the old value.
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
|
||||
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
@ -628,9 +614,8 @@ func TestGetPodDNSCustom(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil {
|
||||
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)()
|
||||
|
||||
testPod.Spec.HostNetwork = tc.hostnetwork
|
||||
testPod.Spec.DNSConfig = tc.dnsConfig
|
||||
@ -643,6 +628,7 @@ func TestGetPodDNSCustom(t *testing.T) {
|
||||
if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) {
|
||||
t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
@ -32,8 +32,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
@ -170,9 +171,7 @@ func Test202StatusCode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAPIListChunking(t *testing.T) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(string(genericfeatures.APIListChunking) + "=true"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)()
|
||||
s, clientSet, closeFn := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
|
@ -33,6 +33,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
@ -485,23 +486,14 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
|
||||
for _, fg := range featureGates() {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
|
||||
}
|
||||
func() {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
@ -704,23 +696,10 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
|
||||
}
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
|
||||
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
// Rollback feature gate.
|
||||
defer func() {
|
||||
if enabled {
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
}()
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
@ -761,17 +740,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
// feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource
|
||||
// on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource.
|
||||
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
defer func() {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)()
|
||||
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
@ -1012,16 +981,7 @@ func TestTaintedNode(t *testing.T) {
|
||||
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
|
||||
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
|
||||
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
|
||||
defer func() {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.TaintNodesByCondition, enabledTaint)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
|
||||
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
|
@ -54,6 +54,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
@ -64,7 +65,7 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
|
||||
// TestPreemption tests a few preemption scenarios.
|
||||
func TestPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -292,7 +293,7 @@ func TestPreemption(t *testing.T) {
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -394,7 +395,7 @@ func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -501,7 +502,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
// node name of the medium priority pod is cleared.
|
||||
func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
@ -615,7 +616,7 @@ func addPodConditionReady(pod *v1.Pod) {
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, context)
|
||||
|
@ -28,10 +28,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
@ -61,14 +63,8 @@ func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
|
||||
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
if !enabled {
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
|
||||
}
|
||||
}()
|
||||
// Enable TaintNodeByCondition
|
||||
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
|
||||
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
@ -36,10 +36,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
@ -95,11 +97,9 @@ type testPVC struct {
|
||||
}
|
||||
|
||||
func TestVolumeBinding(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
@ -268,11 +268,9 @@ func TestVolumeBinding(t *testing.T) {
|
||||
|
||||
// TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed.
|
||||
func TestVolumeBindingRescheduling(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
storageClassName := "local-storage"
|
||||
@ -414,11 +412,9 @@ func TestVolumeBindingDynamicStressSlow(t *testing.T) {
|
||||
}
|
||||
|
||||
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
config := setupCluster(t, "volume-binding-stress-", 1, schedulerResyncPeriod, provisionDelaySeconds, true)
|
||||
defer config.teardown()
|
||||
|
||||
// Set max volume limit to the number of PVCs the test will create
|
||||
@ -491,12 +487,10 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
|
||||
}
|
||||
|
||||
func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
// TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed
|
||||
config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true)
|
||||
config := setupCluster(t, "volume-pod-affinity-", numNodes, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
@ -621,11 +615,9 @@ func TestVolumeBindingWithAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPVAffinityConflict(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
config := setupCluster(t, "volume-scheduling-", 3, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
pv := makePV("local-pv", classImmediate, "", "", node1)
|
||||
@ -684,11 +676,9 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVolumeProvision(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
"PersistentLocalVolumes": true,
|
||||
}
|
||||
config := setupCluster(t, "volume-scheduling", 1, features, 0, 0, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
|
||||
config := setupCluster(t, "volume-scheduling", 1, 0, 0, true)
|
||||
defer config.teardown()
|
||||
|
||||
cases := map[string]struct {
|
||||
@ -825,15 +815,8 @@ func TestVolumeProvision(t *testing.T) {
|
||||
// selectedNode annotation from a claim to reschedule volume provision
|
||||
// on provision failure.
|
||||
func TestRescheduleProvisioning(t *testing.T) {
|
||||
features := map[string]bool{
|
||||
"VolumeScheduling": true,
|
||||
}
|
||||
oldFeatures := make(map[string]bool, len(features))
|
||||
for feature := range features {
|
||||
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
|
||||
}
|
||||
// Set feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(features)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
|
||||
controllerCh := make(chan struct{})
|
||||
|
||||
context := initTestMaster(t, "reschedule-volume-provision", nil)
|
||||
@ -846,8 +829,6 @@ func TestRescheduleProvisioning(t *testing.T) {
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
context.closeFn()
|
||||
// Restore feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
|
||||
}()
|
||||
|
||||
ctrl, informerFactory, err := initPVController(context, 0)
|
||||
@ -893,14 +874,7 @@ func TestRescheduleProvisioning(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
|
||||
oldFeatures := make(map[string]bool, len(features))
|
||||
for feature := range features {
|
||||
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
|
||||
}
|
||||
// Set feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(features)
|
||||
|
||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
|
||||
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, false, disableEquivalenceCache, resyncPeriod)
|
||||
|
||||
clientset := context.clientSet
|
||||
@ -938,8 +912,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[s
|
||||
teardown: func() {
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
cleanupTest(t, context)
|
||||
// Restore feature gates
|
||||
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user