kubernetes/pkg/kubelet/container/runtime_cache_test.go
Yu-Ju Hong c075719f05 Kubelet: fix the runtime cache to not cache the stale pods
If a pod worker sees stale pods from the runtime cache which were retrieved
before their last sync finished, it may think that the pod were not started
correctly, and attemp to fix that by killing/restarting containers.
There are two issues that may cause runtime cache to store stale pods:
  1. The timstamp is recorded *after* getting the pods from the container
     runtime. This may lead the consumer to think the pods are newer than they
     actually are.
  2. The cache updates are triggered by many goroutines (pod workers, and the
     updating thread). There is no mechanism to enforece that the cache would
     only be updated to newer pods.

This change fixes the above two issues by making sure one always record the
timestamp before getting pods from the container runtime, and updates the
cached pods only if the timestamp is newer.
2015-05-05 18:28:38 -07:00

113 lines
3.0 KiB
Go

/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"reflect"
"testing"
"time"
)
// testRunTimeCache embeds runtimeCache with some additional methods for
// testing.
type testRuntimeCache struct {
runtimeCache
}
func (r *testRuntimeCache) updateCacheWithLock() error {
r.Lock()
defer r.Unlock()
return r.updateCache()
}
func (r *testRuntimeCache) getCachedPods() []*Pod {
r.Lock()
defer r.Unlock()
return r.pods
}
func newTestRuntimeCache(getter podsGetter) *testRuntimeCache {
c, _ := NewRuntimeCache(getter)
return &testRuntimeCache{*c.(*runtimeCache)}
}
func TestGetPods(t *testing.T) {
runtime := &FakeRuntime{}
expected := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
runtime.Podlist = expected
cache := newTestRuntimeCache(runtime)
actual, err := cache.GetPods()
if err != nil {
t.Errorf("unexpected error %v", err)
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestForceUpdateIfOlder(t *testing.T) {
runtime := &FakeRuntime{}
cache := newTestRuntimeCache(runtime)
// Cache old pods.
oldpods := []*Pod{{ID: "1111"}}
runtime.Podlist = oldpods
cache.updateCacheWithLock()
// Update the runtime to new pods.
newpods := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
runtime.Podlist = newpods
// An older timestamp should not force an update.
cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute))
actual := cache.getCachedPods()
if !reflect.DeepEqual(oldpods, actual) {
t.Errorf("expected %#v, got %#v", oldpods, actual)
}
// A newer timestamp should force an update.
cache.ForceUpdateIfOlder(time.Now().Add(20 * time.Second))
actual = cache.getCachedPods()
if !reflect.DeepEqual(newpods, actual) {
t.Errorf("expected %#v, got %#v", newpods, actual)
}
}
func TestUpdatePodsOnlyIfNewer(t *testing.T) {
runtime := &FakeRuntime{}
cache := newTestRuntimeCache(runtime)
// Cache new pods with a future timestamp.
newpods := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
cache.Lock()
cache.pods = newpods
cache.cacheTime = time.Now().Add(20 * time.Minute)
cache.Unlock()
// Instruct runime to return a list of old pods.
oldpods := []*Pod{{ID: "1111"}}
runtime.Podlist = oldpods
// Try to update the cache; the attempt should not succeed because the
// cache timestamp is newer than the current time.
cache.updateCacheWithLock()
actual := cache.getCachedPods()
if !reflect.DeepEqual(newpods, actual) {
t.Errorf("expected %#v, got %#v", newpods, actual)
}
}