@@ -39,8 +39,8 @@ import (
|
||||
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
|
||||
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) {
|
||||
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
fakeClock := &util.FakeClock{fakeTime}
|
||||
ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
|
||||
fakeClock := &util.FakeClock{Time: fakeTime}
|
||||
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
|
||||
ttlStore := cache.NewFakeExpirationStore(
|
||||
ExpKeyFunc, nil, ttlPolicy, fakeClock)
|
||||
return &ControllerExpectations{ttlStore}, fakeClock
|
||||
|
@@ -52,40 +52,40 @@ type nnu struct {
|
||||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Added, obj}, 1)
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Modified, obj}, 1)
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{watch.Deleted, lastValue}, 1)
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Added, obj}, 0)
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Modified, obj}, 0)
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{watch.Deleted, lastValue}, 0)
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(meta *api.ObjectMeta) nnu {
|
||||
@@ -170,7 +170,7 @@ func (f *FakeControllerSource) Watch(resourceVersion string) (watch.Interface, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{c.Type, objCopy.(runtime.Object)})
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
|
@@ -479,7 +479,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("Kubelet stopped posting node status."),
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Time{util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
LastTransitionTime: util.Time{Time: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
|
@@ -234,7 +234,7 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
||||
// volumes are removed by processes external to this binder and must be removed from the cluster
|
||||
case api.VolumeFailed:
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume)
|
||||
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
|
||||
} else {
|
||||
glog.V(5).Infof("PersistentVolume[%s] previously failed recycling. Skipping.\n", volume.Name)
|
||||
}
|
||||
|
@@ -183,7 +183,7 @@ func TestBindingWithExamples(t *testing.T) {
|
||||
pv, err := client.PersistentVolumes().Get("any")
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
|
||||
if err != nil {
|
||||
t.Error("Unexpected error getting PV from client: %v", err)
|
||||
t.Errorf("Unexpected error getting PV from client: %v", err)
|
||||
}
|
||||
|
||||
claim, error := client.PersistentVolumeClaims("ns").Get("any")
|
||||
@@ -285,7 +285,7 @@ func TestMissingFromIndex(t *testing.T) {
|
||||
|
||||
pv, err := client.PersistentVolumes().Get("any")
|
||||
if err != nil {
|
||||
t.Error("Unexpected error getting PV from client: %v", err)
|
||||
t.Errorf("Unexpected error getting PV from client: %v", err)
|
||||
}
|
||||
|
||||
claim, error := client.PersistentVolumeClaims("ns").Get("any")
|
||||
|
@@ -200,7 +200,7 @@ func TestSort(t *testing.T) {
|
||||
|
||||
for i, expected := range []string{"gce-pd-1", "gce-pd-5", "gce-pd-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Error("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ func TestSort(t *testing.T) {
|
||||
|
||||
for i, expected := range []string{"nfs-1", "nfs-5", "nfs-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Error("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -135,7 +135,7 @@ func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume
|
||||
// blocks until completion
|
||||
err = volRecycler.Recycle()
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolume[%s] failed recycling: %+v", err)
|
||||
glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err)
|
||||
pv.Status.Message = fmt.Sprintf("Recycling error: %s", err)
|
||||
nextPhase = api.VolumeFailed
|
||||
} else {
|
||||
|
@@ -177,7 +177,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
||||
func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) {
|
||||
// TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks
|
||||
// need to pass in a fake.
|
||||
rm.podControl = controller.RealPodControl{rm.kubeClient, recorder}
|
||||
rm.podControl = controller.RealPodControl{KubeClient: rm.kubeClient, Recorder: recorder}
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
|
@@ -689,7 +689,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
return &api.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
},
|
||||
}
|
||||
fakeRCClient := &testclient.FakeReplicationControllers{fakeClient, "default"}
|
||||
fakeRCClient := &testclient.FakeReplicationControllers{Fake: fakeClient, Namespace: "default"}
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas)
|
||||
updates, gets := 0, 0
|
||||
|
@@ -54,8 +54,6 @@ func updateReplicaCount(rcClient client.ReplicationControllerInterface, controll
|
||||
return getErr
|
||||
}
|
||||
}
|
||||
// Failed 2 updates one of which was with the latest controller, return the update error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker.
|
||||
|
@@ -213,7 +213,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
|
||||
}
|
||||
service = cachedService.lastState
|
||||
delta.Object = cachedService.lastState
|
||||
namespacedName = types.NamespacedName{service.Namespace, service.Name}
|
||||
namespacedName = types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
} else {
|
||||
namespacedName.Namespace = service.Namespace
|
||||
namespacedName.Name = service.Name
|
||||
@@ -659,7 +659,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
|
||||
|
||||
// It's only an actual error if the load balancer still exists.
|
||||
if _, exists, err := s.balancer.GetTCPLoadBalancer(name, s.zone.Region); err != nil {
|
||||
glog.Errorf("External error while checking if TCP load balancer %q exists: name, %v")
|
||||
glog.Errorf("External error while checking if TCP load balancer %q exists: name, %v", name, err)
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
|
@@ -95,7 +95,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
client.ClearActions() // ignore any client calls made in init()
|
||||
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil)
|
||||
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{Namespace: "foo", Name: "bar"}, item.service, nil)
|
||||
if !item.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if item.expectErr && err == nil {
|
||||
|
Reference in New Issue
Block a user