Migrate to k8s.io/utils/clock in pkg/controller

This commit is contained in:
wojtekt
2021-09-09 16:28:21 +02:00
parent 7a0638da76
commit e233feb99b
9 changed files with 25 additions and 24 deletions

View File

@@ -30,7 +30,6 @@ import (
"github.com/google/go-cmp/cmp"
capi "k8s.io/api/certificates/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/fake"
@@ -41,10 +40,11 @@ import (
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1"
"k8s.io/kubernetes/pkg/controller/certificates"
"k8s.io/kubernetes/pkg/features"
testingclock "k8s.io/utils/clock/testing"
)
func TestSigner(t *testing.T) {
fakeClock := clock.FakeClock{}
fakeClock := testingclock.FakeClock{}
s, err := newSigner("kubernetes.io/legacy-unknown", "./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil {

View File

@@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@@ -52,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/features"
hashutil "k8s.io/kubernetes/pkg/util/hash"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/utils/clock"
"k8s.io/utils/integer"
"k8s.io/klog/v2"

View File

@@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
@@ -55,6 +54,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/securitycontext"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
testingclock "k8s.io/utils/clock/testing"
)
var (
@@ -309,7 +309,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
clientset,
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
)
if err != nil {
return nil, nil, nil, err
@@ -473,7 +473,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
f.Core().V1().Pods(),
f.Core().V1().Nodes(),
client,
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
)
if err != nil {
t.Fatal(err)
@@ -3411,7 +3411,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
addNodes(manager.nodeStore, 0, 5, nil)
// the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+10, 0))
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+10, 0))
// will be preserved because it has the newest hash
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
@@ -3456,7 +3456,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
addNodes(manager.nodeStore, 0, 5, nil)
// the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+20, 0))
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+20, 0))
// will be preserved because it has the newest hash
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)

View File

@@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@@ -32,6 +31,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/features"
testingclock "k8s.io/utils/clock/testing"
)
func TestDaemonSetUpdatesPods(t *testing.T) {
@@ -205,12 +205,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
manager.dsStore.Update(ds)
// all old pods are unavailable so should be surged
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
// waiting for pods to go ready, old pods are deleted
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
@@ -219,7 +219,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
manager.dsStore.Update(ds)
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
@@ -243,12 +243,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
// the deleted old pod
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
// the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
@@ -259,12 +259,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
})
// the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
// controller has completed upgrade
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0))
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
}

View File

@@ -21,9 +21,9 @@ import (
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog/v2"
// TODO: Switch to k8s.io/utils/clock once it supports AfterFunc()
"k8s.io/apimachinery/pkg/util/clock"
)
// WorkArgs keeps arguments that will be passed to the function executed by the worker.

View File

@@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
@@ -35,6 +34,7 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil"
testingclock "k8s.io/utils/clock/testing"
)
func alwaysReady() bool { return true }
@@ -322,7 +322,7 @@ func TestGCOrphaned(t *testing.T) {
podInformer.Informer().GetStore().Add(pod)
}
// Overwrite queue
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
gcc.nodeQueue.ShutDown()
gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue")

View File

@@ -26,7 +26,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@@ -37,6 +36,7 @@ import (
"k8s.io/controller-manager/pkg/informerfactory"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/utils/clock"
)
type eventType int

View File

@@ -32,7 +32,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch"
@@ -45,6 +44,8 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
jsonpatch "github.com/evanphx/json-patch"
)
@@ -455,7 +456,7 @@ func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{
source: v1.EventSource{Component: "nodeControllerTest"},
Events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()),
clock: testingclock.NewFakeClock(time.Now()),
}
}

View File

@@ -27,7 +27,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
batchinformers "k8s.io/client-go/informers/batch/v1"
@@ -42,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/controller"
jobutil "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/controller/ttlafterfinished/metrics"
"k8s.io/utils/clock"
)
// Controller watches for changes of Jobs API objects. Triggered by Job creation