refactor: rename schedulerCache to cacheImpl in internal cache
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
16
pkg/scheduler/internal/cache/cache_test.go
vendored
16
pkg/scheduler/internal/cache/cache_test.go
vendored
@@ -233,7 +233,7 @@ type testExpirePodStruct struct {
|
||||
assumedTime time.Time
|
||||
}
|
||||
|
||||
func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time.Time) error {
|
||||
func assumeAndFinishBinding(cache *cacheImpl, pod *v1.Pod, assumedTime time.Time) error {
|
||||
if err := cache.AssumePod(pod); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1087,7 +1087,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
|
||||
expected.Generation = got.info.Generation
|
||||
if !reflect.DeepEqual(got.info, expected) {
|
||||
t.Errorf("Failed to add node into schedulercache:\n got: %+v \nexpected: %+v", got, expected)
|
||||
t.Errorf("Failed to add node into scheduler cache:\n got: %+v \nexpected: %+v", got, expected)
|
||||
}
|
||||
|
||||
// Step 2: dump cached nodes successfully.
|
||||
@@ -1239,7 +1239,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
|
||||
podsWithAffinity = append(podsWithAffinity, pod)
|
||||
}
|
||||
|
||||
var cache *schedulerCache
|
||||
var cache *cacheImpl
|
||||
var snapshot *Snapshot
|
||||
type operation = func(t *testing.T)
|
||||
|
||||
@@ -1487,7 +1487,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func compareCacheWithNodeInfoSnapshot(t *testing.T, cache *schedulerCache, snapshot *Snapshot) error {
|
||||
func compareCacheWithNodeInfoSnapshot(t *testing.T, cache *cacheImpl, snapshot *Snapshot) error {
|
||||
// Compare the map.
|
||||
if len(snapshot.nodeInfoMap) != cache.nodeTree.numNodes {
|
||||
return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", cache.nodeTree.numNodes, len(snapshot.nodeInfoMap))
|
||||
@@ -1561,7 +1561,7 @@ func TestSchedulerCache_updateNodeInfoSnapshotList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var cache *schedulerCache
|
||||
var cache *cacheImpl
|
||||
var snapshot *Snapshot
|
||||
|
||||
addNode := func(t *testing.T, i int) {
|
||||
@@ -1770,7 +1770,7 @@ func setupCacheOf1kNodes30kPods(b *testing.B) Cache {
|
||||
return cache
|
||||
}
|
||||
|
||||
func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time) *schedulerCache {
|
||||
func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time) *cacheImpl {
|
||||
cache := newSchedulerCache(time.Second, time.Second, nil)
|
||||
for i := 0; i < podNum; i++ {
|
||||
nodeName := fmt.Sprintf("node-%d", i/10)
|
||||
@@ -1785,7 +1785,7 @@ func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time)
|
||||
return cache
|
||||
}
|
||||
|
||||
func isForgottenFromCache(p *v1.Pod, c *schedulerCache) error {
|
||||
func isForgottenFromCache(p *v1.Pod, c *cacheImpl) error {
|
||||
if assumed, err := c.IsAssumedPod(p); err != nil {
|
||||
return err
|
||||
} else if assumed {
|
||||
@@ -1798,7 +1798,7 @@ func isForgottenFromCache(p *v1.Pod, c *schedulerCache) error {
|
||||
}
|
||||
|
||||
// getNodeInfo returns cached data for the node name.
|
||||
func (cache *schedulerCache) getNodeInfo(nodeName string) (*v1.Node, error) {
|
||||
func (cache *cacheImpl) getNodeInfo(nodeName string) (*v1.Node, error) {
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
|
Reference in New Issue
Block a user