Remove balanced attached node volumes

kubernetes#60525 introduced
Balanced attached node volumes feature gate to include volume
count for prioritizing nodes. The reason for introducing this
flag was its usefulness in Red Hat OpenShift Online environment
which is not being used any more. So, removing the flag
as it helps in maintainability of the scheduler code base
as mentioned at kubernetes#101489 (comment)
This commit is contained in:
ravisantoshgudimetla
2021-06-22 11:16:32 -04:00
parent 6dd9deea3d
commit b6c75bee15
14 changed files with 29 additions and 321 deletions

View File

@@ -28,9 +28,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@@ -95,8 +92,6 @@ func newNodeInfo(requestedResource *framework.Resource,
// TestAssumePodScheduled tests that after a pod is assumed, its information is aggregated
// on node level.
func TestAssumePodScheduled(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
@@ -248,8 +243,6 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time
// TestExpirePod tests that assumed pods will be removed if expired.
// The removal will be reflected in node info.
func TestExpirePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
@@ -321,8 +314,6 @@ func TestExpirePod(t *testing.T) {
// TestAddPodWillConfirm tests that a pod being Add()ed will be confirmed if assumed.
// The pod info should still exist after manually expiring unconfirmed pods.
func TestAddPodWillConfirm(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
now := time.Now()
ttl := 10 * time.Second
@@ -492,8 +483,6 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
// TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired.
func TestAddPodAfterExpiration(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
ttl := 10 * time.Second
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
@@ -544,8 +533,6 @@ func TestAddPodAfterExpiration(t *testing.T) {
// TestUpdatePod tests that a pod will be updated if added before.
func TestUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
@@ -676,8 +663,6 @@ func TestUpdatePodAndGet(t *testing.T) {
// TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated
func TestExpireAddUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
@@ -777,8 +762,6 @@ func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod {
}
func TestEphemeralStorageResource(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node"
podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct {
@@ -824,8 +807,6 @@ func TestEphemeralStorageResource(t *testing.T) {
// TestRemovePod tests after added pod is removed, its information should also be subtracted.
func TestRemovePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
basePod := makeBasePod(t, "node-1", "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
tests := []struct {
nodes []*v1.Node
@@ -1699,8 +1680,6 @@ func TestSchedulerCache_updateNodeInfoSnapshotList(t *testing.T) {
}
func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer()
for n := 0; n < b.N; n++ {