Fixup incorrect use of DefaultFeatureGate.Set in tests

This commit is contained in:
Jordan Liggitt 2018-11-21 00:25:58 -05:00
parent d440ecdd3b
commit 4dca07ef7e
17 changed files with 157 additions and 296 deletions

View File

@ -20,6 +20,7 @@ import (
"testing" "testing"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
@ -34,10 +35,7 @@ func TestDropAlphaProcMountType(t *testing.T) {
} }
// Enable alpha feature ProcMountType // Enable alpha feature ProcMountType
err1 := utilfeature.DefaultFeatureGate.Set("ProcMountType=true") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, true)()
if err1 != nil {
t.Fatalf("Failed to enable feature gate for ProcMountType: %v", err1)
}
// now test dropping the fields - should not be dropped // now test dropping the fields - should not be dropped
DropDisabledAlphaFields(&psp.Spec) DropDisabledAlphaFields(&psp.Spec)
@ -51,10 +49,7 @@ func TestDropAlphaProcMountType(t *testing.T) {
} }
// Disable alpha feature ProcMountType // Disable alpha feature ProcMountType
err := utilfeature.DefaultFeatureGate.Set("ProcMountType=false") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProcMountType, false)()
if err != nil {
t.Fatalf("Failed to disable feature gate for ProcMountType: %v", err)
}
// now test dropping the fields // now test dropping the fields
DropDisabledAlphaFields(&psp.Spec) DropDisabledAlphaFields(&psp.Spec)

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
@ -74,14 +75,6 @@ func featureToggle(feature utilfeature.Feature) []string {
} }
func TestValidateJob(t *testing.T) { func TestValidateJob(t *testing.T) {
ttlEnabled := utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished)
defer func() {
err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TTLAfterFinished, ttlEnabled))
if err != nil {
t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err)
}
}()
validManualSelector := getValidManualSelector() validManualSelector := getValidManualSelector()
validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector) validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector)
validGeneratedSelector := getValidGeneratedSelector() validGeneratedSelector := getValidGeneratedSelector()
@ -231,11 +224,8 @@ func TestValidateJob(t *testing.T) {
}, },
} }
for _, setFeature := range featureToggle(features.TTLAfterFinished) { for _, setFeature := range []bool{true, false} {
// Set error cases based on if TTLAfterFinished feature is enabled or not defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TTLAfterFinished, setFeature)()
if err := utilfeature.DefaultFeatureGate.Set(setFeature); err != nil {
t.Fatalf("Failed to set feature gate for %s: %v", features.TTLAfterFinished, err)
}
ttlCase := "spec.ttlSecondsAfterFinished:must be greater than or equal to 0" ttlCase := "spec.ttlSecondsAfterFinished:must be greater than or equal to 0"
if utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) { if utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) {
errorCases[ttlCase] = batch.Job{ errorCases[ttlCase] = batch.Job{

View File

@ -18,7 +18,6 @@ package validation
import ( import (
"bytes" "bytes"
"fmt"
"math" "math"
"reflect" "reflect"
"strings" "strings"
@ -784,11 +783,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) {
} }
// Enable alpha feature VolumeSnapshotDataSource // Enable alpha feature VolumeSnapshotDataSource
err := utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=true") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, true)()
if err != nil {
t.Errorf("Failed to enable feature gate for VolumeSnapshotDataSource: %v", err)
return
}
for _, tc := range successTestCases { for _, tc := range successTestCases {
if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) != 0 { if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
@ -800,11 +795,7 @@ func TestAlphaVolumeSnapshotDataSource(t *testing.T) {
} }
} }
// Disable alpha feature VolumeSnapshotDataSource // Disable alpha feature VolumeSnapshotDataSource
err = utilfeature.DefaultFeatureGate.Set("VolumeSnapshotDataSource=false") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSnapshotDataSource, false)()
if err != nil {
t.Errorf("Failed to disable feature gate for VolumeSnapshotDataSource: %v", err)
return
}
for _, tc := range successTestCases { for _, tc := range successTestCases {
if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) == 0 { if errs := ValidatePersistentVolumeClaimSpec(&tc, field.NewPath("spec")); len(errs) == 0 {
t.Errorf("expected failure: %v", errs) t.Errorf("expected failure: %v", errs)
@ -4897,8 +4888,7 @@ func TestValidateVolumeMounts(t *testing.T) {
} }
func TestValidateDisabledSubpath(t *testing.T) { func TestValidateDisabledSubpath(t *testing.T) {
utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)()
defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true")
volumes := []core.Volume{ volumes := []core.Volume{
{Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}},
@ -5734,16 +5724,7 @@ func TestValidateRestartPolicy(t *testing.T) {
} }
func TestValidateDNSPolicy(t *testing.T) { func TestValidateDNSPolicy(t *testing.T) {
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)()
defer func() {
// Restoring the old value.
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err)
}
}()
if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil {
t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err)
}
successCases := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault, core.DNSPolicy(core.DNSClusterFirst), core.DNSNone} successCases := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault, core.DNSPolicy(core.DNSClusterFirst), core.DNSNone}
for _, policy := range successCases { for _, policy := range successCases {
@ -5761,16 +5742,7 @@ func TestValidateDNSPolicy(t *testing.T) {
} }
func TestValidatePodDNSConfig(t *testing.T) { func TestValidatePodDNSConfig(t *testing.T) {
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, true)()
defer func() {
// Restoring the old value.
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
t.Errorf("Failed to restore CustomPodDNS feature gate: %v", err)
}
}()
if err := utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true"); err != nil {
t.Errorf("Failed to enable CustomPodDNS feature gate: %v", err)
}
generateTestSearchPathFunc := func(numChars int) string { generateTestSearchPathFunc := func(numChars int) string {
res := "" res := ""
@ -5932,16 +5904,7 @@ func TestValidatePodDNSConfig(t *testing.T) {
} }
func TestValidatePodReadinessGates(t *testing.T) { func TestValidatePodReadinessGates(t *testing.T) {
podReadinessGatesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodReadinessGates) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadinessGates, true)()
defer func() {
// Restoring the old value.
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%v", features.PodReadinessGates, podReadinessGatesEnabled)); err != nil {
t.Errorf("Failed to restore PodReadinessGates feature gate: %v", err)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodReadinessGates)); err != nil {
t.Errorf("Failed to enable PodReadinessGates feature gate: %v", err)
}
successCases := []struct { successCases := []struct {
desc string desc string

View File

@ -25,6 +25,7 @@ import (
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -482,17 +483,12 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) { func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
for _, fg := range gates { for _, fg := range gates {
func() { for _, f := range []bool{true, false} {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg) func() {
defer func() { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
}()
for _, f := range []bool{true, false} {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf) t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
} }()
}() }
} }
} }

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
kubeapi "k8s.io/kubernetes/pkg/apis/core" kubeapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
@ -181,7 +182,7 @@ type podToMake struct {
// TestMemoryPressure // TestMemoryPressure
func TestMemoryPressure(t *testing.T) { func TestMemoryPressure(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{ podsToMake := []podToMake{
@ -399,10 +400,9 @@ func parseQuantity(value string) resource.Quantity {
} }
func TestDiskPressureNodeFs(t *testing.T) { func TestDiskPressureNodeFs(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
string(features.LocalStorageCapacityIsolation): true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
string(features.PodPriority): true,
})
podMaker := makePodWithDiskStats podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats summaryStatsMaker := makeDiskStats
podsToMake := []podToMake{ podsToMake := []podToMake{
@ -600,7 +600,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
// TestMinReclaim verifies that min-reclaim works as desired. // TestMinReclaim verifies that min-reclaim works as desired.
func TestMinReclaim(t *testing.T) { func TestMinReclaim(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{ podsToMake := []podToMake{
@ -739,10 +739,9 @@ func TestMinReclaim(t *testing.T) {
} }
func TestNodeReclaimFuncs(t *testing.T) { func TestNodeReclaimFuncs(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
string(features.PodPriority): true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
string(features.LocalStorageCapacityIsolation): true,
})
podMaker := makePodWithDiskStats podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats summaryStatsMaker := makeDiskStats
podsToMake := []podToMake{ podsToMake := []podToMake{
@ -918,7 +917,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
} }
func TestInodePressureNodeFsInodes(t *testing.T) { func TestInodePressureNodeFsInodes(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) { podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) {
pod := newPod(name, priority, []v1.Container{ pod := newPod(name, priority, []v1.Container{
newContainer(name, requests, limits), newContainer(name, requests, limits),
@ -1140,7 +1139,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
// TestCriticalPodsAreNotEvicted // TestCriticalPodsAreNotEvicted
func TestCriticalPodsAreNotEvicted(t *testing.T) { func TestCriticalPodsAreNotEvicted(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{ podsToMake := []podToMake{
@ -1210,7 +1209,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
} }
// Enable critical pod annotation feature gate // Enable critical pod annotation feature gate
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
// induce soft threshold // induce soft threshold
fakeClock.Step(1 * time.Minute) fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("1500Mi", podStats) summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
@ -1255,7 +1254,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
} }
// Disable critical pod annotation feature gate // Disable critical pod annotation feature gate
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.ExperimentalCriticalPodAnnotation): false}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)()
// induce memory pressure! // induce memory pressure!
fakeClock.Step(1 * time.Minute) fakeClock.Step(1 * time.Minute)
@ -1275,7 +1274,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
// TestAllocatableMemoryPressure // TestAllocatableMemoryPressure
func TestAllocatableMemoryPressure(t *testing.T) { func TestAllocatableMemoryPressure(t *testing.T) {
utilfeature.DefaultFeatureGate.SetFromMap(map[string]bool{string(features.PodPriority): true}) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{ podsToMake := []podToMake{

View File

@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@ -423,7 +424,8 @@ func thresholdEqual(a evictionapi.Threshold, b evictionapi.Threshold) bool {
} }
func TestOrderedByExceedsRequestMemory(t *testing.T) { func TestOrderedByExceedsRequestMemory(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
below := newPod("below-requests", -1, []v1.Container{ below := newPod("below-requests", -1, []v1.Container{
newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")), newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")),
}, nil) }, nil)
@ -450,8 +452,8 @@ func TestOrderedByExceedsRequestMemory(t *testing.T) {
} }
func TestOrderedByExceedsRequestDisk(t *testing.T) { func TestOrderedByExceedsRequestDisk(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
below := newPod("below-requests", -1, []v1.Container{ below := newPod("below-requests", -1, []v1.Container{
newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")), newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")),
}, nil) }, nil)
@ -478,7 +480,7 @@ func TestOrderedByExceedsRequestDisk(t *testing.T) {
} }
func TestOrderedByPriority(t *testing.T) { func TestOrderedByPriority(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
low := newPod("low-priority", -134, []v1.Container{ low := newPod("low-priority", -134, []v1.Container{
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil) }, nil)
@ -501,7 +503,7 @@ func TestOrderedByPriority(t *testing.T) {
} }
func TestOrderedByPriorityDisabled(t *testing.T) { func TestOrderedByPriorityDisabled(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, false)()
low := newPod("low-priority", lowPriority, []v1.Container{ low := newPod("low-priority", lowPriority, []v1.Container{
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil) }, nil)
@ -525,7 +527,7 @@ func TestOrderedByPriorityDisabled(t *testing.T) {
} }
func TestOrderedbyDisk(t *testing.T) { func TestOrderedbyDisk(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{ pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{ }, []v1.Volume{
@ -592,7 +594,7 @@ func TestOrderedbyDisk(t *testing.T) {
// Tests that we correctly ignore disk requests when the local storage feature gate is disabled. // Tests that we correctly ignore disk requests when the local storage feature gate is disabled.
func TestOrderedbyDiskDisableLocalStorage(t *testing.T) { func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=false", features.LocalStorageCapacityIsolation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, false)()
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{ pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("best-effort-high", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{ }, []v1.Volume{
@ -658,8 +660,8 @@ func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
} }
func TestOrderedbyInodes(t *testing.T) { func TestOrderedbyInodes(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
low := newPod("low", defaultPriority, []v1.Container{ low := newPod("low", defaultPriority, []v1.Container{
newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{ }, []v1.Volume{
@ -702,8 +704,8 @@ func TestOrderedbyInodes(t *testing.T) {
// TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer // TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer
func TestOrderedByPriorityDisk(t *testing.T) { func TestOrderedByPriorityDisk(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.LocalStorageCapacityIsolation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{ pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{ }, []v1.Volume{
@ -787,7 +789,7 @@ func TestOrderedByPriorityDisk(t *testing.T) {
// TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer // TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer
func TestOrderedByPriorityInodes(t *testing.T) { func TestOrderedByPriorityInodes(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{ pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{
newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{ }, []v1.Volume{
@ -880,7 +882,7 @@ func TestOrderedByMemory(t *testing.T) {
// TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request. // TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request.
func TestOrderedByPriorityMemory(t *testing.T) { func TestOrderedByPriorityMemory(t *testing.T) {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{ pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")), newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil) }, nil)

View File

@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
@ -2254,17 +2255,12 @@ func runVolumeManager(kubelet *Kubelet) chan struct{} {
func forEachFeatureGate(t *testing.T, fs []utilfeature.Feature, tf func(t *testing.T)) { func forEachFeatureGate(t *testing.T, fs []utilfeature.Feature, tf func(t *testing.T)) {
for _, fg := range fs { for _, fg := range fs {
func() { for _, f := range []bool{true, false} {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg) func() {
defer func() { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
}()
for _, f := range []bool{true, false} {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
t.Run(fmt.Sprintf("%v(%t)", fg, f), tf) t.Run(fmt.Sprintf("%v(%t)", fg, f), tf)
} }()
}() }
} }
} }

View File

@ -24,6 +24,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
) )
@ -67,15 +69,15 @@ func TestContainerLabels(t *testing.T) {
var tests = []struct { var tests = []struct {
description string description string
featuresCreated string // Features enabled when container is created featuresCreated bool // Features enabled when container is created
featuresStatus string // Features enabled when container status is read featuresStatus bool // Features enabled when container status is read
typeLabel kubecontainer.ContainerType typeLabel kubecontainer.ContainerType
expected *labeledContainerInfo expected *labeledContainerInfo
}{ }{
{ {
"Debug containers disabled", "Debug containers disabled",
"DebugContainers=False", false,
"DebugContainers=False", false,
"ignored", "ignored",
&labeledContainerInfo{ &labeledContainerInfo{
PodName: pod.Name, PodName: pod.Name,
@ -87,8 +89,8 @@ func TestContainerLabels(t *testing.T) {
}, },
{ {
"Regular containers", "Regular containers",
"DebugContainers=True", true,
"DebugContainers=True", true,
kubecontainer.ContainerTypeRegular, kubecontainer.ContainerTypeRegular,
&labeledContainerInfo{ &labeledContainerInfo{
PodName: pod.Name, PodName: pod.Name,
@ -100,8 +102,8 @@ func TestContainerLabels(t *testing.T) {
}, },
{ {
"Init containers", "Init containers",
"DebugContainers=True", true,
"DebugContainers=True", true,
kubecontainer.ContainerTypeInit, kubecontainer.ContainerTypeInit,
&labeledContainerInfo{ &labeledContainerInfo{
PodName: pod.Name, PodName: pod.Name,
@ -113,8 +115,8 @@ func TestContainerLabels(t *testing.T) {
}, },
{ {
"Created without type label", "Created without type label",
"DebugContainers=False", false,
"DebugContainers=True", true,
"ignored", "ignored",
&labeledContainerInfo{ &labeledContainerInfo{
PodName: pod.Name, PodName: pod.Name,
@ -126,8 +128,8 @@ func TestContainerLabels(t *testing.T) {
}, },
{ {
"Created with type label, subsequently disabled", "Created with type label, subsequently disabled",
"DebugContainers=True", true,
"DebugContainers=False", false,
kubecontainer.ContainerTypeRegular, kubecontainer.ContainerTypeRegular,
&labeledContainerInfo{ &labeledContainerInfo{
PodName: pod.Name, PodName: pod.Name,
@ -141,15 +143,16 @@ func TestContainerLabels(t *testing.T) {
// Test whether we can get right information from label // Test whether we can get right information from label
for _, test := range tests { for _, test := range tests {
utilfeature.DefaultFeatureGate.Set(test.featuresCreated) func() {
labels := newContainerLabels(container, pod, test.typeLabel) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresCreated)()
utilfeature.DefaultFeatureGate.Set(test.featuresStatus) labels := newContainerLabels(container, pod, test.typeLabel)
containerInfo := getContainerInfoFromLabels(labels) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DebugContainers, test.featuresStatus)()
if !reflect.DeepEqual(containerInfo, test.expected) { containerInfo := getContainerInfoFromLabels(labels)
t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo) if !reflect.DeepEqual(containerInfo, test.expected) {
} t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo)
}
}()
} }
utilfeature.DefaultFeatureGate.Set("DebugContainers=False")
} }
func TestContainerAnnotations(t *testing.T) { func TestContainerAnnotations(t *testing.T) {

View File

@ -29,7 +29,9 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/features"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -266,14 +268,6 @@ func TestMergeDNSOptions(t *testing.T) {
} }
func TestGetPodDNSType(t *testing.T) { func TestGetPodDNSType(t *testing.T) {
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
defer func() {
// Restoring the old value.
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
}
}()
recorder := record.NewFakeRecorder(20) recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{ nodeRef := &v1.ObjectReference{
Kind: "Node", Kind: "Node",
@ -361,28 +355,28 @@ func TestGetPodDNSType(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil { t.Run(tc.desc, func(t *testing.T) {
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)()
}
if tc.hasClusterDNS { if tc.hasClusterDNS {
configurer.clusterDNS = testClusterDNS configurer.clusterDNS = testClusterDNS
} else { } else {
configurer.clusterDNS = nil configurer.clusterDNS = nil
}
pod.Spec.DNSPolicy = tc.dnsPolicy
pod.Spec.HostNetwork = tc.hostNetwork
resType, err := getPodDNSType(pod)
if tc.expectedError {
if err == nil {
t.Errorf("%s: GetPodDNSType(%v) got no error, want error", tc.desc, pod)
} }
continue pod.Spec.DNSPolicy = tc.dnsPolicy
} pod.Spec.HostNetwork = tc.hostNetwork
if resType != tc.expectedDNSType {
t.Errorf("%s: GetPodDNSType(%v)=%v, want %v", tc.desc, pod, resType, tc.expectedDNSType) resType, err := getPodDNSType(pod)
} if tc.expectedError {
if err == nil {
t.Errorf("%s: GetPodDNSType(%v) got no error, want error", tc.desc, pod)
}
return
}
if resType != tc.expectedDNSType {
t.Errorf("%s: GetPodDNSType(%v)=%v, want %v", tc.desc, pod, resType, tc.expectedDNSType)
}
})
} }
} }
@ -482,14 +476,6 @@ func TestGetPodDNS(t *testing.T) {
} }
func TestGetPodDNSCustom(t *testing.T) { func TestGetPodDNSCustom(t *testing.T) {
customDNSEnabled := utilfeature.DefaultFeatureGate.Enabled("CustomPodDNS")
defer func() {
// Restoring the old value.
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", customDNSEnabled)); err != nil {
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
}
}()
recorder := record.NewFakeRecorder(20) recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{ nodeRef := &v1.ObjectReference{
Kind: "Node", Kind: "Node",
@ -628,21 +614,21 @@ func TestGetPodDNSCustom(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("CustomPodDNS=%v", tc.customPodDNSFeatureGate)); err != nil { t.Run(tc.desc, func(t *testing.T) {
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CustomPodDNS, tc.customPodDNSFeatureGate)()
}
testPod.Spec.HostNetwork = tc.hostnetwork testPod.Spec.HostNetwork = tc.hostnetwork
testPod.Spec.DNSConfig = tc.dnsConfig testPod.Spec.DNSConfig = tc.dnsConfig
testPod.Spec.DNSPolicy = tc.dnsPolicy testPod.Spec.DNSPolicy = tc.dnsPolicy
resDNSConfig, err := configurer.GetPodDNS(testPod) resDNSConfig, err := configurer.GetPodDNS(testPod)
if err != nil { if err != nil {
t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, testPod, err) t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, testPod, err)
} }
if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) { if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) {
t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig) t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig)
} }
})
} }
} }

View File

@ -43,6 +43,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -32,8 +32,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
genericfeatures "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/pager" "k8s.io/client-go/tools/pager"
@ -170,9 +171,7 @@ func Test202StatusCode(t *testing.T) {
} }
func TestAPIListChunking(t *testing.T) { func TestAPIListChunking(t *testing.T) {
if err := utilfeature.DefaultFeatureGate.Set(string(genericfeatures.APIListChunking) + "=true"); err != nil { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)()
t.Fatal(err)
}
s, clientSet, closeFn := setup(t) s, clientSet, closeFn := setup(t)
defer closeFn() defer closeFn()

View File

@ -33,6 +33,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",

View File

@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1" appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
@ -485,21 +486,12 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) { func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
for _, fg := range featureGates() { for _, fg := range featureGates() {
func() { for _, f := range []bool{true, false} {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg) func() {
defer func() { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
}
}()
for _, f := range []bool{true, false} {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
}
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf) t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
} }()
}() }
} }
} }
@ -704,23 +696,10 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
}) })
} }
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
}
}
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity. // When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore. // Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
defer closeFn() defer closeFn()
@ -761,17 +740,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
// feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource // feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource
// on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource. // on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource.
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) { func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)()
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
}
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
@ -1012,16 +981,7 @@ func TestTaintedNode(t *testing.T) {
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled // TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
// to the Unschedulable nodes when TaintNodesByCondition are enabled. // to the Unschedulable nodes when TaintNodesByCondition are enabled.
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.TaintNodesByCondition, enabledTaint)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
}
forEachFeatureGate(t, func(t *testing.T) { forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {

View File

@ -54,6 +54,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",

View File

@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
@ -64,7 +65,7 @@ func waitForNominatedNodeName(cs clientset.Interface, pod *v1.Pod) error {
// TestPreemption tests a few preemption scenarios. // TestPreemption tests a few preemption scenarios.
func TestPreemption(t *testing.T) { func TestPreemption(t *testing.T) {
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption") context := initTest(t, "preemption")
defer cleanupTest(t, context) defer cleanupTest(t, context)
@ -292,7 +293,7 @@ func TestPreemption(t *testing.T) {
// TestDisablePreemption tests disable pod preemption of scheduler works as expected. // TestDisablePreemption tests disable pod preemption of scheduler works as expected.
func TestDisablePreemption(t *testing.T) { func TestDisablePreemption(t *testing.T) {
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler, and disable preemption. // Initialize scheduler, and disable preemption.
context := initTestDisablePreemption(t, "disable-preemption") context := initTestDisablePreemption(t, "disable-preemption")
defer cleanupTest(t, context) defer cleanupTest(t, context)
@ -394,7 +395,7 @@ func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace
// after preemption and while the higher priority pods is not scheduled yet. // after preemption and while the higher priority pods is not scheduled yet.
func TestPreemptionStarvation(t *testing.T) { func TestPreemptionStarvation(t *testing.T) {
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption") context := initTest(t, "preemption")
defer cleanupTest(t, context) defer cleanupTest(t, context)
@ -501,7 +502,7 @@ func TestPreemptionStarvation(t *testing.T) {
// node name of the medium priority pod is cleared. // node name of the medium priority pod is cleared.
func TestNominatedNodeCleanUp(t *testing.T) { func TestNominatedNodeCleanUp(t *testing.T) {
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption") context := initTest(t, "preemption")
defer cleanupTest(t, context) defer cleanupTest(t, context)
@ -615,7 +616,7 @@ func addPodConditionReady(pod *v1.Pod) {
// TestPDBInPreemption tests PodDisruptionBudget support in preemption. // TestPDBInPreemption tests PodDisruptionBudget support in preemption.
func TestPDBInPreemption(t *testing.T) { func TestPDBInPreemption(t *testing.T) {
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption-pdb") context := initTest(t, "preemption-pdb")
defer cleanupTest(t, context) defer cleanupTest(t, context)

View File

@ -28,10 +28,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider" "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction" "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
@ -61,14 +63,8 @@ func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature. // TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
func TestTaintNodeByCondition(t *testing.T) { func TestTaintNodeByCondition(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
defer func() {
if !enabled {
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
}
}()
// Enable TaintNodeByCondition // Enable TaintNodeByCondition
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
// Build PodToleration Admission. // Build PodToleration Admission.
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{}) admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})

View File

@ -36,10 +36,12 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options" persistentvolumeoptions "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -95,11 +97,9 @@ type testPVC struct {
} }
func TestVolumeBinding(t *testing.T) { func TestVolumeBinding(t *testing.T) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true, config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
}
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
defer config.teardown() defer config.teardown()
cases := map[string]struct { cases := map[string]struct {
@ -268,11 +268,9 @@ func TestVolumeBinding(t *testing.T) {
// TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed. // TestVolumeBindingRescheduling tests scheduler will retry scheduling when needed.
func TestVolumeBindingRescheduling(t *testing.T) { func TestVolumeBindingRescheduling(t *testing.T) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true, config := setupCluster(t, "volume-scheduling-", 2, 0, 0, true)
}
config := setupCluster(t, "volume-scheduling-", 2, features, 0, 0, true)
defer config.teardown() defer config.teardown()
storageClassName := "local-storage" storageClassName := "local-storage"
@ -414,11 +412,9 @@ func TestVolumeBindingDynamicStressSlow(t *testing.T) {
} }
func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) { func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, dynamic bool, provisionDelaySeconds int) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true, config := setupCluster(t, "volume-binding-stress-", 1, schedulerResyncPeriod, provisionDelaySeconds, true)
}
config := setupCluster(t, "volume-binding-stress-", 1, features, schedulerResyncPeriod, provisionDelaySeconds, true)
defer config.teardown() defer config.teardown()
// Set max volume limit to the number of PVCs the test will create // Set max volume limit to the number of PVCs the test will create
@ -491,12 +487,10 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration,
} }
func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) { func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, numPVsFirstNode int) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true,
}
// TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed // TODO: disable equivalence cache until kubernetes/kubernetes#67680 is fixed
config := setupCluster(t, "volume-pod-affinity-", numNodes, features, 0, 0, true) config := setupCluster(t, "volume-pod-affinity-", numNodes, 0, 0, true)
defer config.teardown() defer config.teardown()
pods := []*v1.Pod{} pods := []*v1.Pod{}
@ -621,11 +615,9 @@ func TestVolumeBindingWithAffinity(t *testing.T) {
} }
func TestPVAffinityConflict(t *testing.T) { func TestPVAffinityConflict(t *testing.T) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true, config := setupCluster(t, "volume-scheduling-", 3, 0, 0, true)
}
config := setupCluster(t, "volume-scheduling-", 3, features, 0, 0, true)
defer config.teardown() defer config.teardown()
pv := makePV("local-pv", classImmediate, "", "", node1) pv := makePV("local-pv", classImmediate, "", "", node1)
@ -684,11 +676,9 @@ func TestPVAffinityConflict(t *testing.T) {
} }
func TestVolumeProvision(t *testing.T) { func TestVolumeProvision(t *testing.T) {
features := map[string]bool{ defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
"VolumeScheduling": true, defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PersistentLocalVolumes, true)()
"PersistentLocalVolumes": true, config := setupCluster(t, "volume-scheduling", 1, 0, 0, true)
}
config := setupCluster(t, "volume-scheduling", 1, features, 0, 0, true)
defer config.teardown() defer config.teardown()
cases := map[string]struct { cases := map[string]struct {
@ -825,15 +815,8 @@ func TestVolumeProvision(t *testing.T) {
// selectedNode annotation from a claim to reschedule volume provision // selectedNode annotation from a claim to reschedule volume provision
// on provision failure. // on provision failure.
func TestRescheduleProvisioning(t *testing.T) { func TestRescheduleProvisioning(t *testing.T) {
features := map[string]bool{
"VolumeScheduling": true,
}
oldFeatures := make(map[string]bool, len(features))
for feature := range features {
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
}
// Set feature gates // Set feature gates
utilfeature.DefaultFeatureGate.SetFromMap(features) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, true)()
controllerCh := make(chan struct{}) controllerCh := make(chan struct{})
context := initTestMaster(t, "reschedule-volume-provision", nil) context := initTestMaster(t, "reschedule-volume-provision", nil)
@ -846,8 +829,6 @@ func TestRescheduleProvisioning(t *testing.T) {
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, nil)
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
context.closeFn() context.closeFn()
// Restore feature gates
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
}() }()
ctrl, informerFactory, err := initPVController(context, 0) ctrl, informerFactory, err := initPVController(context, 0)
@ -893,14 +874,7 @@ func TestRescheduleProvisioning(t *testing.T) {
} }
} }
func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[string]bool, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig { func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int, disableEquivalenceCache bool) *testConfig {
oldFeatures := make(map[string]bool, len(features))
for feature := range features {
oldFeatures[feature] = utilfeature.DefaultFeatureGate.Enabled(utilfeature.Feature(feature))
}
// Set feature gates
utilfeature.DefaultFeatureGate.SetFromMap(features)
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, false, disableEquivalenceCache, resyncPeriod) context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), false, nil, false, disableEquivalenceCache, resyncPeriod)
clientset := context.clientSet clientset := context.clientSet
@ -938,8 +912,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, features map[s
teardown: func() { teardown: func() {
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, nil)
cleanupTest(t, context) cleanupTest(t, context)
// Restore feature gates
utilfeature.DefaultFeatureGate.SetFromMap(oldFeatures)
}, },
} }
} }