replacing handwritten client in several controllers
This commit is contained in:
@@ -27,8 +27,9 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -61,7 +62,7 @@ const (
|
||||
// TODO: this really should be called ReplicationController. The only reason why it's a Manager
|
||||
// is to distinguish this type from API object "ReplicationController". We should fix this.
|
||||
type ReplicationManager struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
podControl controller.PodControlInterface
|
||||
|
||||
// An rc is temporarily suspended after creating/deleting these many replicas.
|
||||
@@ -90,10 +91,10 @@ type ReplicationManager struct {
|
||||
}
|
||||
|
||||
// NewReplicationManager creates a new ReplicationManager.
|
||||
func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
|
||||
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
|
||||
rm := &ReplicationManager{
|
||||
kubeClient: kubeClient,
|
||||
@@ -109,10 +110,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
|
||||
rm.rcStore.Store, rm.rcController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
|
||||
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
@@ -150,10 +151,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
|
||||
rm.podStore.Store, rm.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.Pods(api.NamespaceAll).Watch(options)
|
||||
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -451,7 +452,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
}
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
if err := updateReplicaCount(rm.kubeClient.ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
|
||||
if err := updateReplicaCount(rm.kubeClient.Legacy().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the controller ensures
|
||||
// we retry with some fairness.
|
||||
glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)
|
||||
|
@@ -29,8 +29,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
@@ -132,9 +134,9 @@ type serverResponse struct {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
@@ -148,9 +150,9 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
@@ -164,9 +166,9 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
@@ -197,8 +199,8 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||
@@ -220,8 +222,8 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Steady state for the replication controller, no Status.Replicas updates expected
|
||||
@@ -262,9 +264,8 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||
@@ -302,10 +303,9 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
@@ -351,7 +351,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
inRCs []*api.ReplicationController
|
||||
@@ -410,16 +410,11 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type FakeWatcher struct {
|
||||
w *watch.FakeWatcher
|
||||
*testclient.Fake
|
||||
}
|
||||
|
||||
func TestWatchControllers(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
client := &testclient.Fake{}
|
||||
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := &fake.Clientset{}
|
||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
var testControllerSpec api.ReplicationController
|
||||
@@ -460,9 +455,9 @@ func TestWatchControllers(t *testing.T) {
|
||||
|
||||
func TestWatchPods(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
client := &testclient.Fake{}
|
||||
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := &fake.Clientset{}
|
||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Put one rc and one pod into the controller's stores
|
||||
@@ -504,7 +499,7 @@ func TestWatchPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePods(t *testing.T) {
|
||||
manager := NewReplicationManager(testclient.NewSimpleFake(), controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager := NewReplicationManager(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
received := make(chan string)
|
||||
@@ -563,8 +558,8 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
rc := newReplicationController(1)
|
||||
@@ -598,31 +593,31 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
|
||||
func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
rc := newReplicationController(1)
|
||||
fakeClient := &testclient.Fake{}
|
||||
fakeClient.AddReactor("get", "replicationcontrollers", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
c := &fake.Clientset{}
|
||||
c.AddReactor("get", "replicationcontrollers", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, rc, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
|
||||
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
})
|
||||
fakeRCClient := &testclient.FakeReplicationControllers{fakeClient, "default"}
|
||||
fakeRCClient := c.Legacy().ReplicationControllers("default")
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas)
|
||||
updates, gets := 0, 0
|
||||
for _, a := range fakeClient.Actions() {
|
||||
for _, a := range c.Actions() {
|
||||
if a.GetResource() != "replicationcontrollers" {
|
||||
t.Errorf("Unexpected action %+v", a)
|
||||
continue
|
||||
}
|
||||
|
||||
switch action := a.(type) {
|
||||
case testclient.GetAction:
|
||||
case core.GetAction:
|
||||
gets++
|
||||
// Make sure the get is for the right rc even though the update failed.
|
||||
if action.GetName() != rc.Name {
|
||||
t.Errorf("Expected get for rc %v, got %+v instead", rc.Name, action.GetName())
|
||||
}
|
||||
case testclient.UpdateAction:
|
||||
case core.UpdateAction:
|
||||
updates++
|
||||
// Confirm that the update has the right status.Replicas even though the Get
|
||||
// returned an rc with replicas=1.
|
||||
@@ -643,9 +638,9 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, burstReplicas)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, burstReplicas)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
@@ -763,9 +758,9 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestRCSyncExpectations(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
@@ -788,8 +783,8 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
rc := newReplicationController(1)
|
||||
@@ -830,9 +825,9 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRCManagerNotReady(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = func() bool { return false }
|
||||
|
||||
@@ -867,10 +862,10 @@ func shuffle(controllers []*api.ReplicationController) []*api.ReplicationControl
|
||||
}
|
||||
|
||||
func TestOverlappingRCs(t *testing.T) {
|
||||
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||
@@ -910,8 +905,8 @@ func TestRCManagerInit(t *testing.T) {
|
||||
// TODO: Uncomment when fix #19254
|
||||
// defer testServer.Close()
|
||||
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
|
||||
manager.rcStore.Store.Add(rc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
|
||||
|
@@ -21,11 +21,11 @@ package replication
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
)
|
||||
|
||||
// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
|
||||
func updateReplicaCount(rcClient client.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
|
||||
func updateReplicaCount(rcClient unversioned_legacy.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
|
||||
// This is the steady state. It happens when the rc doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
|
Reference in New Issue
Block a user