Merge pull request #7004 from krousey/scheduler_pointer_fix
Change a few uses of api.Pod to *api.Pod to avoid copying
This commit is contained in:
6
pkg/client/cache/listers.go
vendored
6
pkg/client/cache/listers.go
vendored
@@ -42,11 +42,11 @@ type StoreToPodLister struct {
|
|||||||
// TODO Get rid of the selector because that is confusing because the user might not realize that there has already been
|
// TODO Get rid of the selector because that is confusing because the user might not realize that there has already been
|
||||||
// some selection at the caching stage. Also, consistency will facilitate code generation. However, the pkg/client
|
// some selection at the caching stage. Also, consistency will facilitate code generation. However, the pkg/client
|
||||||
// is inconsistent too.
|
// is inconsistent too.
|
||||||
func (s *StoreToPodLister) List(selector labels.Selector) (pods []api.Pod, err error) {
|
func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err error) {
|
||||||
for _, m := range s.Store.List() {
|
for _, m := range s.Store.List() {
|
||||||
pod := m.(*api.Pod)
|
pod := m.(*api.Pod)
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
pods = append(pods, *pod)
|
pods = append(pods, pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pods, nil
|
return pods, nil
|
||||||
@@ -106,7 +106,7 @@ func (s *StoreToServiceLister) List() (services api.ServiceList, err error) {
|
|||||||
|
|
||||||
// TODO: Move this back to scheduler as a helper function that takes a Store,
|
// TODO: Move this back to scheduler as a helper function that takes a Store,
|
||||||
// rather than a method of StoreToServiceLister.
|
// rather than a method of StoreToServiceLister.
|
||||||
func (s *StoreToServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) {
|
func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) {
|
||||||
var selector labels.Selector
|
var selector labels.Selector
|
||||||
var service api.Service
|
var service api.Service
|
||||||
|
|
||||||
|
@@ -34,9 +34,9 @@ func NewSourceApiserver(c *client.Client, hostname string, updates chan<- interf
|
|||||||
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver.
|
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver.
|
||||||
func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) {
|
func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) {
|
||||||
send := func(objs []interface{}) {
|
send := func(objs []interface{}) {
|
||||||
var pods []api.Pod
|
var pods []*api.Pod
|
||||||
for _, o := range objs {
|
for _, o := range objs {
|
||||||
pods = append(pods, *o.(*api.Pod))
|
pods = append(pods, o.(*api.Pod))
|
||||||
}
|
}
|
||||||
updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource}
|
updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource}
|
||||||
}
|
}
|
||||||
|
@@ -42,20 +42,20 @@ func (lw fakePodLW) Watch(resourceVersion string) (watch.Interface, error) {
|
|||||||
var _ cache.ListerWatcher = fakePodLW{}
|
var _ cache.ListerWatcher = fakePodLW{}
|
||||||
|
|
||||||
func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
||||||
pod1v1 := api.Pod{
|
pod1v1 := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "p"},
|
ObjectMeta: api.ObjectMeta{Name: "p"},
|
||||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
|
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
|
||||||
pod1v2 := api.Pod{
|
pod1v2 := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "p"},
|
ObjectMeta: api.ObjectMeta{Name: "p"},
|
||||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
|
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
|
||||||
pod2 := api.Pod{
|
pod2 := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "q"},
|
ObjectMeta: api.ObjectMeta{Name: "q"},
|
||||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
|
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
|
||||||
|
|
||||||
// Setup fake api client.
|
// Setup fake api client.
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
lw := fakePodLW{
|
lw := fakePodLW{
|
||||||
listResp: &api.PodList{Items: []api.Pod{pod1v1}},
|
listResp: &api.PodList{Items: []api.Pod{*pod1v1}},
|
||||||
watchResp: fakeWatch,
|
watchResp: fakeWatch,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add another pod
|
// Add another pod
|
||||||
fakeWatch.Add(&pod2)
|
fakeWatch.Add(pod2)
|
||||||
got, ok = <-ch
|
got, ok = <-ch
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("Unable to read from channel when expected")
|
t.Errorf("Unable to read from channel when expected")
|
||||||
@@ -89,7 +89,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Modify pod1
|
// Modify pod1
|
||||||
fakeWatch.Modify(&pod1v2)
|
fakeWatch.Modify(pod1v2)
|
||||||
got, ok = <-ch
|
got, ok = <-ch
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("Unable to read from channel when expected")
|
t.Errorf("Unable to read from channel when expected")
|
||||||
@@ -103,7 +103,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete pod1
|
// Delete pod1
|
||||||
fakeWatch.Delete(&pod1v2)
|
fakeWatch.Delete(pod1v2)
|
||||||
got, ok = <-ch
|
got, ok = <-ch
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("Unable to read from channel when expected")
|
t.Errorf("Unable to read from channel when expected")
|
||||||
@@ -115,7 +115,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete pod2
|
// Delete pod2
|
||||||
fakeWatch.Delete(&pod2)
|
fakeWatch.Delete(pod2)
|
||||||
got, ok = <-ch
|
got, ok = <-ch
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("Unable to read from channel when expected")
|
t.Errorf("Unable to read from channel when expected")
|
||||||
|
@@ -85,7 +85,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, hostname string) er
|
|||||||
|
|
||||||
type defaultFunc func(pod *api.Pod) error
|
type defaultFunc func(pod *api.Pod) error
|
||||||
|
|
||||||
func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod api.Pod, err error) {
|
func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *api.Pod, err error) {
|
||||||
obj, err := api.Scheme.Decode(data)
|
obj, err := api.Scheme.Decode(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, pod, err
|
return false, pod, err
|
||||||
@@ -104,7 +104,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod ap
|
|||||||
err = fmt.Errorf("invalid pod: %v", errs)
|
err = fmt.Errorf("invalid pod: %v", errs)
|
||||||
return true, pod, err
|
return true, pod, err
|
||||||
}
|
}
|
||||||
return true, *newPod, nil
|
return true, newPod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) {
|
func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) {
|
||||||
@@ -132,7 +132,7 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api
|
|||||||
return true, *newPods, err
|
return true, *newPods, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, manifest v1beta1.ContainerManifest, pod api.Pod, err error) {
|
func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, manifest v1beta1.ContainerManifest, pod *api.Pod, err error) {
|
||||||
// TODO: should be api.Scheme.Decode
|
// TODO: should be api.Scheme.Decode
|
||||||
// This is awful. DecodeInto() expects to find an APIObject, which
|
// This is awful. DecodeInto() expects to find an APIObject, which
|
||||||
// Manifest is not. We keep reading manifest for now for compat, but
|
// Manifest is not. We keep reading manifest for now for compat, but
|
||||||
@@ -144,6 +144,7 @@ func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, m
|
|||||||
// avoids writing a v1beta1.ContainerManifest -> api.Pod
|
// avoids writing a v1beta1.ContainerManifest -> api.Pod
|
||||||
// conversion which would be identical to the api.ContainerManifest ->
|
// conversion which would be identical to the api.ContainerManifest ->
|
||||||
// api.Pod conversion.
|
// api.Pod conversion.
|
||||||
|
pod = new(api.Pod)
|
||||||
if err = yaml.Unmarshal(data, &manifest); err != nil {
|
if err = yaml.Unmarshal(data, &manifest); err != nil {
|
||||||
return false, manifest, pod, err
|
return false, manifest, pod, err
|
||||||
}
|
}
|
||||||
@@ -155,10 +156,10 @@ func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, m
|
|||||||
err = fmt.Errorf("invalid manifest: %v", errs)
|
err = fmt.Errorf("invalid manifest: %v", errs)
|
||||||
return false, manifest, pod, err
|
return false, manifest, pod, err
|
||||||
}
|
}
|
||||||
if err = api.Scheme.Convert(&newManifest, &pod); err != nil {
|
if err = api.Scheme.Convert(&newManifest, pod); err != nil {
|
||||||
return true, manifest, pod, err
|
return true, manifest, pod, err
|
||||||
}
|
}
|
||||||
if err := defaultFn(&pod); err != nil {
|
if err := defaultFn(pod); err != nil {
|
||||||
return true, manifest, pod, err
|
return true, manifest, pod, err
|
||||||
}
|
}
|
||||||
// Success.
|
// Success.
|
||||||
|
@@ -168,12 +168,12 @@ func (s *podStorage) Merge(source string, change interface{}) error {
|
|||||||
s.updates <- *updates
|
s.updates <- *updates
|
||||||
}
|
}
|
||||||
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
|
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source}
|
||||||
}
|
}
|
||||||
|
|
||||||
case PodConfigNotificationSnapshot:
|
case PodConfigNotificationSnapshot:
|
||||||
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
|
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source}
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -212,7 +212,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||||||
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
|
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
|
||||||
// this is an update
|
// this is an update
|
||||||
existing.Spec = ref.Spec
|
existing.Spec = ref.Spec
|
||||||
updates.Pods = append(updates.Pods, *existing)
|
updates.Pods = append(updates.Pods, existing)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// this is a no-op
|
// this is a no-op
|
||||||
@@ -224,17 +224,17 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||||||
}
|
}
|
||||||
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
|
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
|
||||||
pods[name] = ref
|
pods[name] = ref
|
||||||
adds.Pods = append(adds.Pods, *ref)
|
adds.Pods = append(adds.Pods, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
case kubelet.REMOVE:
|
case kubelet.REMOVE:
|
||||||
glog.V(4).Infof("Removing a pod %v", update)
|
glog.V(4).Infof("Removing a pod %v", update)
|
||||||
for _, value := range update.Pods {
|
for _, value := range update.Pods {
|
||||||
name := kubecontainer.GetPodFullName(&value)
|
name := kubecontainer.GetPodFullName(value)
|
||||||
if existing, found := pods[name]; found {
|
if existing, found := pods[name]; found {
|
||||||
// this is a delete
|
// this is a delete
|
||||||
delete(pods, name)
|
delete(pods, name)
|
||||||
deletes.Pods = append(deletes.Pods, *existing)
|
deletes.Pods = append(deletes.Pods, existing)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// this is a no-op
|
// this is a no-op
|
||||||
@@ -255,7 +255,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||||||
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
|
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
|
||||||
// this is an update
|
// this is an update
|
||||||
existing.Spec = ref.Spec
|
existing.Spec = ref.Spec
|
||||||
updates.Pods = append(updates.Pods, *existing)
|
updates.Pods = append(updates.Pods, existing)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// this is a no-op
|
// this is a no-op
|
||||||
@@ -266,13 +266,13 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||||||
}
|
}
|
||||||
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
|
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
|
||||||
pods[name] = ref
|
pods[name] = ref
|
||||||
adds.Pods = append(adds.Pods, *ref)
|
adds.Pods = append(adds.Pods, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, existing := range oldPods {
|
for name, existing := range oldPods {
|
||||||
if _, found := pods[name]; !found {
|
if _, found := pods[name]; !found {
|
||||||
// this is a delete
|
// this is a delete
|
||||||
deletes.Pods = append(deletes.Pods, *existing)
|
deletes.Pods = append(deletes.Pods, existing)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,10 +297,9 @@ func (s *podStorage) seenSources(sources ...string) bool {
|
|||||||
return s.sourcesSeen.HasAll(sources...)
|
return s.sourcesSeen.HasAll(sources...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
|
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
|
||||||
names := util.StringSet{}
|
names := util.StringSet{}
|
||||||
for i := range pods {
|
for i, pod := range pods {
|
||||||
pod := &pods[i]
|
|
||||||
var errlist []error
|
var errlist []error
|
||||||
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
||||||
errlist = append(errlist, errs...)
|
errlist = append(errlist, errs...)
|
||||||
@@ -330,21 +329,21 @@ func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecor
|
|||||||
func (s *podStorage) Sync() {
|
func (s *podStorage) Sync() {
|
||||||
s.updateLock.Lock()
|
s.updateLock.Lock()
|
||||||
defer s.updateLock.Unlock()
|
defer s.updateLock.Unlock()
|
||||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, kubelet.AllSource}
|
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, kubelet.AllSource}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object implements config.Accessor
|
// Object implements config.Accessor
|
||||||
func (s *podStorage) MergedState() interface{} {
|
func (s *podStorage) MergedState() interface{} {
|
||||||
s.podLock.RLock()
|
s.podLock.RLock()
|
||||||
defer s.podLock.RUnlock()
|
defer s.podLock.RUnlock()
|
||||||
pods := make([]api.Pod, 0)
|
pods := make([]*api.Pod, 0)
|
||||||
for _, sourcePods := range s.pods {
|
for _, sourcePods := range s.pods {
|
||||||
for _, podRef := range sourcePods {
|
for _, podRef := range sourcePods {
|
||||||
pod, err := api.Scheme.Copy(podRef)
|
pod, err := api.Scheme.Copy(podRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to copy pod: %v", err)
|
glog.Errorf("unable to copy pod: %v", err)
|
||||||
}
|
}
|
||||||
pods = append(pods, *pod.(*api.Pod))
|
pods = append(pods, pod.(*api.Pod))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pods
|
return pods
|
||||||
|
@@ -39,7 +39,7 @@ func expectEmptyChannel(t *testing.T, ch <-chan interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type sortedPods []api.Pod
|
type sortedPods []*api.Pod
|
||||||
|
|
||||||
func (s sortedPods) Len() int {
|
func (s sortedPods) Len() int {
|
||||||
return len(s)
|
return len(s)
|
||||||
@@ -51,8 +51,8 @@ func (s sortedPods) Less(i, j int) bool {
|
|||||||
return s[i].Namespace < s[j].Namespace
|
return s[i].Namespace < s[j].Namespace
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateValidPod(name, namespace, source string) api.Pod {
|
func CreateValidPod(name, namespace, source string) *api.Pod {
|
||||||
return api.Pod{
|
return &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: types.UID(name), // for the purpose of testing, this is unique enough
|
UID: types.UID(name), // for the purpose of testing, this is unique enough
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -67,12 +67,8 @@ func CreateValidPod(name, namespace, source string) api.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.Pod) kubelet.PodUpdate {
|
func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...*api.Pod) kubelet.PodUpdate {
|
||||||
newPods := make([]api.Pod, len(pods))
|
return kubelet.PodUpdate{Pods: pods, Op: op, Source: source}
|
||||||
for i := range pods {
|
|
||||||
newPods[i] = pods[i]
|
|
||||||
}
|
|
||||||
return kubelet.PodUpdate{newPods, op, source}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
|
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
|
||||||
@@ -162,7 +158,7 @@ func TestInvalidPodFiltered(t *testing.T) {
|
|||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test")))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test")))
|
||||||
|
|
||||||
// add an invalid update
|
// add an invalid update
|
||||||
podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||||
channel <- podUpdate
|
channel <- podUpdate
|
||||||
expectNoPodUpdate(t, ch)
|
expectNoPodUpdate(t, ch)
|
||||||
}
|
}
|
||||||
@@ -179,10 +175,10 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
|
|||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test")))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test")))
|
||||||
|
|
||||||
// container updates are separated as UPDATE
|
// container updates are separated as UPDATE
|
||||||
pod := podUpdate.Pods[0]
|
pod := *podUpdate.Pods[0]
|
||||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||||
channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod)
|
channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod)
|
||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, &pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewPodAddedSnapshot(t *testing.T) {
|
func TestNewPodAddedSnapshot(t *testing.T) {
|
||||||
@@ -197,10 +193,10 @@ func TestNewPodAddedSnapshot(t *testing.T) {
|
|||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test")))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test")))
|
||||||
|
|
||||||
// container updates are separated as UPDATE
|
// container updates are separated as UPDATE
|
||||||
pod := podUpdate.Pods[0]
|
pod := *podUpdate.Pods[0]
|
||||||
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
|
||||||
channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod)
|
channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod)
|
||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, pod))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, &pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
||||||
@@ -221,7 +217,7 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
|||||||
channel <- podUpdate
|
channel <- podUpdate
|
||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod))
|
||||||
|
|
||||||
podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||||
channel <- podUpdate
|
channel <- podUpdate
|
||||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod))
|
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod))
|
||||||
}
|
}
|
||||||
|
@@ -66,7 +66,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Emit an update with an empty PodList to allow FileSource to be marked as seen
|
// Emit an update with an empty PodList to allow FileSource to be marked as seen
|
||||||
s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.FileSource}
|
s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.FileSource}
|
||||||
return fmt.Errorf("path does not exist, ignoring")
|
return fmt.Errorf("path does not exist, ignoring")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.FileSource}
|
s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.FileSource}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("path is not a directory or file")
|
return fmt.Errorf("path is not a directory or file")
|
||||||
@@ -95,13 +95,13 @@ func (s *sourceFile) extractFromPath() error {
|
|||||||
// Get as many pod configs as we can from a directory. Return an error iff something
|
// Get as many pod configs as we can from a directory. Return an error iff something
|
||||||
// prevented us from reading anything at all. Do not return an error if only some files
|
// prevented us from reading anything at all. Do not return an error if only some files
|
||||||
// were problematic.
|
// were problematic.
|
||||||
func (s *sourceFile) extractFromDir(name string) ([]api.Pod, error) {
|
func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) {
|
||||||
dirents, err := filepath.Glob(filepath.Join(name, "[^.]*"))
|
dirents, err := filepath.Glob(filepath.Join(name, "[^.]*"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("glob failed: %v", err)
|
return nil, fmt.Errorf("glob failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := make([]api.Pod, 0)
|
pods := make([]*api.Pod, 0)
|
||||||
if len(dirents) == 0 {
|
if len(dirents) == 0 {
|
||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
@@ -131,7 +131,7 @@ func (s *sourceFile) extractFromDir(name string) ([]api.Pod, error) {
|
|||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sourceFile) extractFromFile(filename string) (pod api.Pod, err error) {
|
func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) {
|
||||||
glog.V(3).Infof("Reading config file %q", filename)
|
glog.V(3).Infof("Reading config file %q", filename)
|
||||||
file, err := os.Open(filename)
|
file, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -83,7 +83,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
"id": "test",
|
"id": "test",
|
||||||
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "test-" + hostname,
|
Name: "test-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -109,7 +109,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
"uuid": "12345",
|
"uuid": "12345",
|
||||||
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "12345-" + hostname,
|
Name: "12345-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -136,7 +136,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
"id": "test",
|
"id": "test",
|
||||||
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
"containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}]
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "test-" + hostname,
|
Name: "test-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -169,7 +169,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "test-" + hostname,
|
Name: "test-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -200,7 +200,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "12345-" + hostname,
|
Name: "12345-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -232,7 +232,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
"containers": [{ "name": "image", "image": "test/image" }]
|
"containers": [{ "name": "image", "image": "test/image" }]
|
||||||
}
|
}
|
||||||
}`,
|
}`,
|
||||||
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "test-" + hostname,
|
Name: "test-" + hostname,
|
||||||
UID: "12345",
|
UID: "12345",
|
||||||
@@ -264,7 +264,7 @@ func TestReadFromFile(t *testing.T) {
|
|||||||
case got := <-ch:
|
case got := <-ch:
|
||||||
update := got.(kubelet.PodUpdate)
|
update := got.(kubelet.PodUpdate)
|
||||||
for _, pod := range update.Pods {
|
for _, pod := range update.Pods {
|
||||||
if errs := validation.ValidatePod(&pod); len(errs) > 0 {
|
if errs := validation.ValidatePod(pod); len(errs) > 0 {
|
||||||
t.Errorf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
|
t.Errorf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -335,7 +335,7 @@ func TestExtractFromEmptyDir(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.Pod) {
|
func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, *api.Pod) {
|
||||||
hostname := "an-example-host"
|
hostname := "an-example-host"
|
||||||
|
|
||||||
manifest := v1beta1.ContainerManifest{
|
manifest := v1beta1.ContainerManifest{
|
||||||
@@ -358,7 +358,7 @@ func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.Pod) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
expectedPod := api.Pod{
|
expectedPod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: id + "-" + hostname,
|
Name: id + "-" + hostname,
|
||||||
UID: types.UID(id),
|
UID: types.UID(id),
|
||||||
@@ -391,7 +391,7 @@ func TestExtractFromDir(t *testing.T) {
|
|||||||
manifest2, expectedPod2 := ExampleManifestAndPod("2")
|
manifest2, expectedPod2 := ExampleManifestAndPod("2")
|
||||||
|
|
||||||
manifests := []v1beta1.ContainerManifest{manifest, manifest2}
|
manifests := []v1beta1.ContainerManifest{manifest, manifest2}
|
||||||
pods := []api.Pod{expectedPod, expectedPod2}
|
pods := []*api.Pod{expectedPod, expectedPod2}
|
||||||
files := make([]*os.File, len(manifests))
|
files := make([]*os.File, len(manifests))
|
||||||
|
|
||||||
dirName, err := ioutil.TempDir("", "foo")
|
dirName, err := ioutil.TempDir("", "foo")
|
||||||
@@ -433,9 +433,9 @@ func TestExtractFromDir(t *testing.T) {
|
|||||||
if !api.Semantic.DeepDerivative(expected, update) {
|
if !api.Semantic.DeepDerivative(expected, update) {
|
||||||
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
||||||
}
|
}
|
||||||
for i := range update.Pods {
|
for _, pod := range update.Pods {
|
||||||
if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 {
|
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
||||||
t.Errorf("Expected no validation errors on %#v, Got %q", update.Pods[i], errs)
|
t.Errorf("Expected no validation errors on %#v, Got %q", pod, errs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -74,7 +74,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
}
|
}
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
|
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
|
||||||
s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.HTTPSource}
|
s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.HTTPSource}
|
||||||
return fmt.Errorf("zero-length data received from %v", s.url)
|
return fmt.Errorf("zero-length data received from %v", s.url)
|
||||||
}
|
}
|
||||||
// Short circuit if the manifest has not changed since the last time it was read.
|
// Short circuit if the manifest has not changed since the last time it was read.
|
||||||
@@ -91,12 +91,12 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
return singleErr
|
return singleErr
|
||||||
}
|
}
|
||||||
// It parsed!
|
// It parsed!
|
||||||
s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// That didn't work, so try an array of manifests.
|
// That didn't work, so try an array of manifests.
|
||||||
parsed, manifests, pods, multiErr := tryDecodeManifestList(data, s.applyDefaults)
|
parsed, manifests, podList, multiErr := tryDecodeManifestList(data, s.applyDefaults)
|
||||||
if parsed {
|
if parsed {
|
||||||
if multiErr != nil {
|
if multiErr != nil {
|
||||||
// It parsed but could not be used.
|
// It parsed but could not be used.
|
||||||
@@ -110,7 +110,11 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
return singleErr
|
return singleErr
|
||||||
}
|
}
|
||||||
// It parsed!
|
// It parsed!
|
||||||
s.updates <- kubelet.PodUpdate{pods.Items, kubelet.SET, kubelet.HTTPSource}
|
pods := make([]*api.Pod, 0)
|
||||||
|
for i := range podList.Items {
|
||||||
|
pods = append(pods, &podList.Items[i])
|
||||||
|
}
|
||||||
|
s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.HTTPSource}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,18 +128,22 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
// It parsed but could not be used.
|
// It parsed but could not be used.
|
||||||
return singlePodErr
|
return singlePodErr
|
||||||
}
|
}
|
||||||
s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// That didn't work, so try a list of pods.
|
// That didn't work, so try a list of pods.
|
||||||
parsed, pods, multiPodErr := tryDecodePodList(data, s.applyDefaults)
|
parsed, podList, multiPodErr := tryDecodePodList(data, s.applyDefaults)
|
||||||
if parsed {
|
if parsed {
|
||||||
if multiPodErr != nil {
|
if multiPodErr != nil {
|
||||||
// It parsed but could not be used.
|
// It parsed but could not be used.
|
||||||
return multiPodErr
|
return multiPodErr
|
||||||
}
|
}
|
||||||
s.updates <- kubelet.PodUpdate{pods.Items, kubelet.SET, kubelet.HTTPSource}
|
pods := make([]*api.Pod, 0)
|
||||||
|
for i := range podList.Items {
|
||||||
|
pods = append(pods, &podList.Items[i])
|
||||||
|
}
|
||||||
|
s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.HTTPSource}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -130,7 +130,7 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -155,7 +155,7 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
Containers: []v1beta1.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}},
|
Containers: []v1beta1.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "111" + "-" + hostname,
|
Name: "111" + "-" + hostname,
|
||||||
@@ -180,7 +180,7 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -209,7 +209,7 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -227,7 +227,7 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
ImagePullPolicy: "Always"}},
|
ImagePullPolicy: "Always"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "222",
|
UID: "222",
|
||||||
Name: "bar" + "-" + hostname,
|
Name: "bar" + "-" + hostname,
|
||||||
@@ -283,9 +283,9 @@ func TestExtractManifestFromHTTP(t *testing.T) {
|
|||||||
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
||||||
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
||||||
}
|
}
|
||||||
for i := range update.Pods {
|
for _, pod := range update.Pods {
|
||||||
if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 {
|
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
||||||
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, update.Pods[i], errors.NewAggregate(errs))
|
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errors.NewAggregate(errs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -317,7 +317,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -355,7 +355,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -406,7 +406,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: CreatePodUpdate(kubelet.SET,
|
expected: CreatePodUpdate(kubelet.SET,
|
||||||
kubelet.HTTPSource,
|
kubelet.HTTPSource,
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "111",
|
UID: "111",
|
||||||
Name: "foo" + "-" + hostname,
|
Name: "foo" + "-" + hostname,
|
||||||
@@ -424,7 +424,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||||||
ImagePullPolicy: "Always"}},
|
ImagePullPolicy: "Always"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "222",
|
UID: "222",
|
||||||
Name: "bar" + "-" + hostname,
|
Name: "bar" + "-" + hostname,
|
||||||
@@ -472,9 +472,9 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||||||
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
||||||
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
||||||
}
|
}
|
||||||
for i := range update.Pods {
|
for _, pod := range update.Pods {
|
||||||
if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 {
|
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
||||||
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, update.Pods[i], errors.NewAggregate(errs))
|
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errors.NewAggregate(errs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -91,7 +91,7 @@ type SyncHandler interface {
|
|||||||
// Syncs current state to match the specified pods. SyncPodType specified what
|
// Syncs current state to match the specified pods. SyncPodType specified what
|
||||||
// type of sync is occuring per pod. StartTime specifies the time at which
|
// type of sync is occuring per pod. StartTime specifies the time at which
|
||||||
// syncing began (for use in monitoring).
|
// syncing began (for use in monitoring).
|
||||||
SyncPods(pods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]api.Pod,
|
SyncPods(pods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]api.Pod,
|
||||||
startTime time.Time) error
|
startTime time.Time) error
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1319,7 +1319,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
|
|
||||||
// Stores all volumes defined by the set of pods into a map.
|
// Stores all volumes defined by the set of pods into a map.
|
||||||
// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME)
|
// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME)
|
||||||
func getDesiredVolumes(pods []api.Pod) map[string]api.Volume {
|
func getDesiredVolumes(pods []*api.Pod) map[string]api.Volume {
|
||||||
desiredVolumes := make(map[string]api.Volume)
|
desiredVolumes := make(map[string]api.Volume)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
for _, volume := range pod.Spec.Volumes {
|
for _, volume := range pod.Spec.Volumes {
|
||||||
@@ -1330,10 +1330,10 @@ func getDesiredVolumes(pods []api.Pod) map[string]api.Volume {
|
|||||||
return desiredVolumes
|
return desiredVolumes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) cleanupOrphanedPods(pods []api.Pod) error {
|
func (kl *Kubelet) cleanupOrphanedPods(pods []*api.Pod) error {
|
||||||
desired := util.NewStringSet()
|
desired := util.NewStringSet()
|
||||||
for i := range pods {
|
for _, pod := range pods {
|
||||||
desired.Insert(string(pods[i].UID))
|
desired.Insert(string(pod.UID))
|
||||||
}
|
}
|
||||||
found, err := kl.listPodsFromDisk()
|
found, err := kl.listPodsFromDisk()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1353,7 +1353,7 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.Pod) error {
|
|||||||
|
|
||||||
// Compares the map of current volumes to the map of desired volumes.
|
// Compares the map of current volumes to the map of desired volumes.
|
||||||
// If an active volume does not have a respective desired volume, clean it up.
|
// If an active volume does not have a respective desired volume, clean it up.
|
||||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.Pod, running []*docker.Container) error {
|
func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, running []*docker.Container) error {
|
||||||
desiredVolumes := getDesiredVolumes(pods)
|
desiredVolumes := getDesiredVolumes(pods)
|
||||||
currentVolumes := kl.getPodVolumesFromDisk()
|
currentVolumes := kl.getPodVolumesFromDisk()
|
||||||
runningSet := util.StringSet{}
|
runningSet := util.StringSet{}
|
||||||
@@ -1388,7 +1388,7 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.Pod, running []*docker.Cont
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SyncPods synchronizes the configured list of pods (desired state) with the host current state.
|
// SyncPods synchronizes the configured list of pods (desired state) with the host current state.
|
||||||
func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType,
|
func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType,
|
||||||
mirrorPods map[string]api.Pod, start time.Time) error {
|
mirrorPods map[string]api.Pod, start time.Time) error {
|
||||||
defer func() {
|
defer func() {
|
||||||
metrics.SyncPodsLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.SyncPodsLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
@@ -1397,15 +1397,15 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric
|
|||||||
// Remove obsolete entries in podStatus where the pod is no longer considered bound to this node.
|
// Remove obsolete entries in podStatus where the pod is no longer considered bound to this node.
|
||||||
podFullNames := make(map[string]bool)
|
podFullNames := make(map[string]bool)
|
||||||
for _, pod := range allPods {
|
for _, pod := range allPods {
|
||||||
podFullNames[kubecontainer.GetPodFullName(&pod)] = true
|
podFullNames[kubecontainer.GetPodFullName(pod)] = true
|
||||||
}
|
}
|
||||||
kl.statusManager.RemoveOrphanedStatuses(podFullNames)
|
kl.statusManager.RemoveOrphanedStatuses(podFullNames)
|
||||||
|
|
||||||
// Filter out the rejected pod. They don't have running containers.
|
// Filter out the rejected pod. They don't have running containers.
|
||||||
kl.handleNotFittingPods(allPods)
|
kl.handleNotFittingPods(allPods)
|
||||||
var pods []api.Pod
|
var pods []*api.Pod
|
||||||
for _, pod := range allPods {
|
for _, pod := range allPods {
|
||||||
status, ok := kl.statusManager.GetPodStatus(kubecontainer.GetPodFullName(&pod))
|
status, ok := kl.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod))
|
||||||
if ok && status.Phase == api.PodFailed {
|
if ok && status.Phase == api.PodFailed {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1423,8 +1423,7 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check for any containers that need starting
|
// Check for any containers that need starting
|
||||||
for ix := range pods {
|
for _, pod := range pods {
|
||||||
pod := &pods[ix]
|
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
desiredPods[uid] = empty{}
|
desiredPods[uid] = empty{}
|
||||||
@@ -1504,7 +1503,7 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type podsByCreationTime []api.Pod
|
type podsByCreationTime []*api.Pod
|
||||||
|
|
||||||
func (s podsByCreationTime) Len() int {
|
func (s podsByCreationTime) Len() int {
|
||||||
return len(s)
|
return len(s)
|
||||||
@@ -1519,31 +1518,30 @@ func (s podsByCreationTime) Less(i, j int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkHostPortConflicts detects pods with conflicted host ports.
|
// checkHostPortConflicts detects pods with conflicted host ports.
|
||||||
func checkHostPortConflicts(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) {
|
func checkHostPortConflicts(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) {
|
||||||
ports := map[int]bool{}
|
ports := map[int]bool{}
|
||||||
extract := func(p *api.ContainerPort) int { return p.HostPort }
|
extract := func(p *api.ContainerPort) int { return p.HostPort }
|
||||||
|
|
||||||
// Respect the pod creation order when resolving conflicts.
|
// Respect the pod creation order when resolving conflicts.
|
||||||
sort.Sort(podsByCreationTime(pods))
|
sort.Sort(podsByCreationTime(pods))
|
||||||
|
|
||||||
for i := range pods {
|
for _, pod := range pods {
|
||||||
pod := &pods[i]
|
|
||||||
if errs := validation.AccumulateUniquePorts(pod.Spec.Containers, ports, extract); len(errs) != 0 {
|
if errs := validation.AccumulateUniquePorts(pod.Spec.Containers, ports, extract); len(errs) != 0 {
|
||||||
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", kubecontainer.GetPodFullName(pod), errs)
|
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", kubecontainer.GetPodFullName(pod), errs)
|
||||||
notFitting = append(notFitting, *pod)
|
notFitting = append(notFitting, pod)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fitting = append(fitting, *pod)
|
fitting = append(fitting, pod)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCapacityExceeded detects pods that exceeds node's resources.
|
// checkCapacityExceeded detects pods that exceeds node's resources.
|
||||||
func (kl *Kubelet) checkCapacityExceeded(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) {
|
func (kl *Kubelet) checkCapacityExceeded(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) {
|
||||||
info, err := kl.GetCachedMachineInfo()
|
info, err := kl.GetCachedMachineInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error getting machine info: %v", err)
|
glog.Errorf("error getting machine info: %v", err)
|
||||||
return pods, []api.Pod{}
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Respect the pod creation order when resolving conflicts.
|
// Respect the pod creation order when resolving conflicts.
|
||||||
@@ -1554,14 +1552,14 @@ func (kl *Kubelet) checkCapacityExceeded(pods []api.Pod) (fitting []api.Pod, not
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkNodeSelectorMatching detects pods that do not match node's labels.
|
// checkNodeSelectorMatching detects pods that do not match node's labels.
|
||||||
func (kl *Kubelet) checkNodeSelectorMatching(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) {
|
func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) {
|
||||||
node, err := kl.GetNode()
|
node, err := kl.GetNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error getting node: %v", err)
|
glog.Errorf("error getting node: %v", err)
|
||||||
return pods, []api.Pod{}
|
return pods, nil
|
||||||
}
|
}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if !scheduler.PodMatchesNodeLabels(&pod, node) {
|
if !scheduler.PodMatchesNodeLabels(pod, node) {
|
||||||
notFitting = append(notFitting, pod)
|
notFitting = append(notFitting, pod)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1572,25 +1570,25 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []api.Pod) (fitting []api.Pod,
|
|||||||
|
|
||||||
// handleNotfittingPods handles pods that do not fit on the node.
|
// handleNotfittingPods handles pods that do not fit on the node.
|
||||||
// Currently conflicts on Port.HostPort values, matching node's labels and exceeding node's capacity are handled.
|
// Currently conflicts on Port.HostPort values, matching node's labels and exceeding node's capacity are handled.
|
||||||
func (kl *Kubelet) handleNotFittingPods(pods []api.Pod) {
|
func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) {
|
||||||
fitting, notFitting := checkHostPortConflicts(pods)
|
fitting, notFitting := checkHostPortConflicts(pods)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(&pod, "hostPortConflict", "Cannot start the pod due to host port conflict.")
|
kl.recorder.Eventf(pod, "hostPortConflict", "Cannot start the pod due to host port conflict.")
|
||||||
kl.statusManager.SetPodStatus(&pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
Message: "Pod cannot be started due to host port conflict"})
|
Message: "Pod cannot be started due to host port conflict"})
|
||||||
}
|
}
|
||||||
fitting, notFitting = kl.checkNodeSelectorMatching(fitting)
|
fitting, notFitting = kl.checkNodeSelectorMatching(fitting)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(&pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.")
|
kl.recorder.Eventf(pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.")
|
||||||
kl.statusManager.SetPodStatus(&pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
Message: "Pod cannot be started due to node selector mismatch"})
|
Message: "Pod cannot be started due to node selector mismatch"})
|
||||||
}
|
}
|
||||||
fitting, notFitting = kl.checkCapacityExceeded(fitting)
|
fitting, notFitting = kl.checkCapacityExceeded(fitting)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(&pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.")
|
kl.recorder.Eventf(pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.")
|
||||||
kl.statusManager.SetPodStatus(&pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
Message: "Pod cannot be started due to exceeded capacity"})
|
Message: "Pod cannot be started due to exceeded capacity"})
|
||||||
}
|
}
|
||||||
@@ -1716,7 +1714,7 @@ func (kl *Kubelet) GetHostIP() (net.IP, error) {
|
|||||||
|
|
||||||
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
|
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
|
||||||
// pods.
|
// pods.
|
||||||
func (kl *Kubelet) GetPods() []api.Pod {
|
func (kl *Kubelet) GetPods() []*api.Pod {
|
||||||
return kl.podManager.GetPods()
|
return kl.podManager.GetPods()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -459,7 +459,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
container := api.Container{Name: "bar"}
|
container := api.Container{Name: "bar"}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -482,7 +482,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -527,7 +527,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) {
|
|||||||
TerminationMessagePath: "/dev/somepath",
|
TerminationMessagePath: "/dev/somepath",
|
||||||
}
|
}
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -586,7 +586,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
|||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -649,7 +649,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
|||||||
puller.HasImages = []string{}
|
puller.HasImages = []string{}
|
||||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -702,7 +702,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
|
|||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
fakeDocker := testKubelet.fakeDocker
|
fakeDocker := testKubelet.fakeDocker
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -719,7 +719,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
|
|||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -765,7 +765,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
|||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
fakeHttp := fakeHTTP{}
|
fakeHttp := fakeHTTP{}
|
||||||
kubelet.httpClient = &fakeHttp
|
kubelet.httpClient = &fakeHttp
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -793,7 +793,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
|||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -841,7 +841,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) {
|
|||||||
fakeDocker := testKubelet.fakeDocker
|
fakeDocker := testKubelet.fakeDocker
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -880,7 +880,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo2_new_87654321_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo2_new_87654321_0"},
|
||||||
ID: "8765",
|
ID: "8765",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -966,7 +966,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil {
|
if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Validate nothing happened.
|
// Validate nothing happened.
|
||||||
@@ -974,7 +974,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||||||
fakeDocker.ClearCalls()
|
fakeDocker.ClearCalls()
|
||||||
|
|
||||||
ready = true
|
ready = true
|
||||||
if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil {
|
if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
||||||
@@ -1013,7 +1013,7 @@ func TestSyncPodsDeletes(t *testing.T) {
|
|||||||
ID: "4567",
|
ID: "4567",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now())
|
err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -1040,7 +1040,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) {
|
|||||||
fakeDocker := testKubelet.fakeDocker
|
fakeDocker := testKubelet.fakeDocker
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -1063,7 +1063,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_bar_new_12345678_2222"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_bar_new_12345678_2222"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1121,7 +1121,7 @@ func TestSyncPodsBadHash(t *testing.T) {
|
|||||||
fakeDocker := testKubelet.fakeDocker
|
fakeDocker := testKubelet.fakeDocker
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -1144,7 +1144,7 @@ func TestSyncPodsBadHash(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -1192,7 +1192,7 @@ func TestSyncPodsUnhealthy(t *testing.T) {
|
|||||||
fakeDocker := testKubelet.fakeDocker
|
fakeDocker := testKubelet.fakeDocker
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -1219,7 +1219,7 @@ func TestSyncPodsUnhealthy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -1808,7 +1808,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
|||||||
err: fmt.Errorf("test error"),
|
err: fmt.Errorf("test error"),
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -1836,7 +1836,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
|||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -1892,7 +1892,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
|
|||||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -3005,7 +3005,7 @@ func TestPortForward(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that identify the host port conflicts are detected correctly.
|
// Tests that identify the host port conflicts are detected correctly.
|
||||||
func TestGetHostPortConflicts(t *testing.T) {
|
func TestGetHostPortConflicts(t *testing.T) {
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
|
||||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
|
||||||
@@ -3018,11 +3018,11 @@ func TestGetHostPortConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The new pod should cause conflict and be reported.
|
// The new pod should cause conflict and be reported.
|
||||||
expected := api.Pod{
|
expected := &api.Pod{
|
||||||
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
|
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
|
||||||
}
|
}
|
||||||
pods = append(pods, expected)
|
pods = append(pods, expected)
|
||||||
if _, actual := checkHostPortConflicts(pods); !reflect.DeepEqual(actual, []api.Pod{expected}) {
|
if _, actual := checkHostPortConflicts(pods); !reflect.DeepEqual(actual, []*api.Pod{expected}) {
|
||||||
t.Errorf("expected %#v, Got %#v", expected, actual)
|
t.Errorf("expected %#v, Got %#v", expected, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3034,7 +3034,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
|
|
||||||
spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "123456789",
|
UID: "123456789",
|
||||||
@@ -3056,7 +3056,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||||||
pods[1].CreationTimestamp = util.NewTime(time.Now())
|
pods[1].CreationTimestamp = util.NewTime(time.Now())
|
||||||
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
|
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
|
||||||
// The newer pod should be rejected.
|
// The newer pod should be rejected.
|
||||||
conflictedPodName := kubecontainer.GetPodFullName(&pods[0])
|
conflictedPodName := kubecontainer.GetPodFullName(pods[0])
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
@@ -3087,7 +3087,7 @@ func TestHandleNodeSelector(t *testing.T) {
|
|||||||
{ObjectMeta: api.ObjectMeta{Name: "testnode", Labels: map[string]string{"key": "B"}}},
|
{ObjectMeta: api.ObjectMeta{Name: "testnode", Labels: map[string]string{"key": "B"}}},
|
||||||
}}
|
}}
|
||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "123456789",
|
UID: "123456789",
|
||||||
@@ -3106,7 +3106,7 @@ func TestHandleNodeSelector(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// The first pod should be rejected.
|
// The first pod should be rejected.
|
||||||
notfittingPodName := kubecontainer.GetPodFullName(&pods[0])
|
notfittingPodName := kubecontainer.GetPodFullName(pods[0])
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
@@ -3140,7 +3140,7 @@ func TestHandleMemExceeded(t *testing.T) {
|
|||||||
"memory": resource.MustParse("90"),
|
"memory": resource.MustParse("90"),
|
||||||
},
|
},
|
||||||
}}}}
|
}}}}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "123456789",
|
UID: "123456789",
|
||||||
@@ -3162,7 +3162,7 @@ func TestHandleMemExceeded(t *testing.T) {
|
|||||||
pods[1].CreationTimestamp = util.NewTime(time.Now())
|
pods[1].CreationTimestamp = util.NewTime(time.Now())
|
||||||
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
|
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
|
||||||
// The newer pod should be rejected.
|
// The newer pod should be rejected.
|
||||||
notfittingPodName := kubecontainer.GetPodFullName(&pods[0])
|
notfittingPodName := kubecontainer.GetPodFullName(pods[0])
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
@@ -3191,7 +3191,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
|||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
|
|
||||||
kl := testKubelet.kubelet
|
kl := testKubelet.kubelet
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "pod1"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
{ObjectMeta: api.ObjectMeta{Name: "pod1"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "pod2"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
{ObjectMeta: api.ObjectMeta{Name: "pod2"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||||
}
|
}
|
||||||
@@ -3201,7 +3201,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
|||||||
t.Fatalf("expected to have status cached for %q: %v", "pod2", err)
|
t.Fatalf("expected to have status cached for %q: %v", "pod2", err)
|
||||||
}
|
}
|
||||||
// Sync with empty pods so that the entry in status map will be removed.
|
// Sync with empty pods so that the entry in status map will be removed.
|
||||||
kl.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now())
|
kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now())
|
||||||
if _, err := kl.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err == nil {
|
if _, err := kl.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err == nil {
|
||||||
t.Fatalf("expected to not have status cached for %q: %v", "pod2", err)
|
t.Fatalf("expected to not have status cached for %q: %v", "pod2", err)
|
||||||
}
|
}
|
||||||
@@ -3483,7 +3483,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t)
|
||||||
kl := testKubelet.kubelet
|
kl := testKubelet.kubelet
|
||||||
manager := testKubelet.fakeMirrorClient
|
manager := testKubelet.fakeMirrorClient
|
||||||
pod := api.Pod{
|
pod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
@@ -3493,13 +3493,13 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pods := []api.Pod{pod}
|
pods := []*api.Pod{pod}
|
||||||
kl.podManager.SetPods(pods)
|
kl.podManager.SetPods(pods)
|
||||||
err := kl.syncPod(&pod, nil, container.Pod{})
|
err := kl.syncPod(pod, nil, container.Pod{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
podFullName := kubecontainer.GetPodFullName(&pod)
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
if !manager.HasPod(podFullName) {
|
if !manager.HasPod(podFullName) {
|
||||||
t.Errorf("expected mirror pod %q to be created", podFullName)
|
t.Errorf("expected mirror pod %q to be created", podFullName)
|
||||||
}
|
}
|
||||||
@@ -3513,7 +3513,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
kl := testKubelet.kubelet
|
kl := testKubelet.kubelet
|
||||||
manager := testKubelet.fakeMirrorClient
|
manager := testKubelet.fakeMirrorClient
|
||||||
pod := api.Pod{
|
pod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -3529,7 +3529,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Mirror pod has an outdated spec.
|
// Mirror pod has an outdated spec.
|
||||||
mirrorPod := api.Pod{
|
mirrorPod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "11111111",
|
UID: "11111111",
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -3546,13 +3546,13 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := []api.Pod{pod, mirrorPod}
|
pods := []*api.Pod{pod, mirrorPod}
|
||||||
kl.podManager.SetPods(pods)
|
kl.podManager.SetPods(pods)
|
||||||
err := kl.syncPod(&pod, &mirrorPod, container.Pod{})
|
err := kl.syncPod(pod, mirrorPod, container.Pod{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
name := kubecontainer.GetPodFullName(&pod)
|
name := kubecontainer.GetPodFullName(pod)
|
||||||
creates, deletes := manager.GetCounts(name)
|
creates, deletes := manager.GetCounts(name)
|
||||||
if creates != 0 || deletes != 1 {
|
if creates != 0 || deletes != 1 {
|
||||||
t.Errorf("expected 0 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
|
t.Errorf("expected 0 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
|
||||||
@@ -3564,7 +3564,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
kl := testKubelet.kubelet
|
kl := testKubelet.kubelet
|
||||||
manager := testKubelet.fakeMirrorClient
|
manager := testKubelet.fakeMirrorClient
|
||||||
orphanPods := []api.Pod{
|
orphanPods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -3600,7 +3600,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||||||
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
|
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
|
||||||
}
|
}
|
||||||
for _, pod := range orphanPods {
|
for _, pod := range orphanPods {
|
||||||
name := kubecontainer.GetPodFullName(&pod)
|
name := kubecontainer.GetPodFullName(pod)
|
||||||
creates, deletes := manager.GetCounts(name)
|
creates, deletes := manager.GetCounts(name)
|
||||||
if creates != 0 || deletes != 1 {
|
if creates != 0 || deletes != 1 {
|
||||||
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
|
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
|
||||||
@@ -3611,7 +3611,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
|||||||
func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
||||||
// pods contain one static and one mirror pod with the same name but
|
// pods contain one static and one mirror pod with the same name but
|
||||||
// different UIDs.
|
// different UIDs.
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "1234",
|
UID: "1234",
|
||||||
@@ -3685,7 +3685,7 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) {
|
|||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
waitGroup := testKubelet.waitGroup
|
waitGroup := testKubelet.waitGroup
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -3709,7 +3709,7 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) {
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
waitGroup.Wait()
|
waitGroup.Wait()
|
||||||
podFullName := kubecontainer.GetPodFullName(&pods[0])
|
podFullName := kubecontainer.GetPodFullName(pods[0])
|
||||||
status, ok := kubelet.statusManager.GetPodStatus(podFullName)
|
status, ok := kubelet.statusManager.GetPodStatus(podFullName)
|
||||||
if ok {
|
if ok {
|
||||||
t.Errorf("unexpected status %#v found for static pod %q", status, podFullName)
|
t.Errorf("unexpected status %#v found for static pod %q", status, podFullName)
|
||||||
@@ -3739,7 +3739,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||||||
HostNetwork: true,
|
HostNetwork: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
kubelet.podManager.SetPods([]api.Pod{*pod})
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
||||||
err := kubelet.syncPod(pod, nil, container.Pod{})
|
err := kubelet.syncPod(pod, nil, container.Pod{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("expected pod infra creation to succeed: %v", err)
|
t.Errorf("expected pod infra creation to succeed: %v", err)
|
||||||
@@ -3786,7 +3786,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) {
|
|||||||
{Name: "succeeded"},
|
{Name: "succeeded"},
|
||||||
{Name: "failed"},
|
{Name: "failed"},
|
||||||
}
|
}
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -3802,7 +3802,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) {
|
|||||||
runningAPIContainers := []docker.APIContainers{
|
runningAPIContainers := []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -4019,7 +4019,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
|||||||
fakeDocker.ExitedContainerList = exitedAPIContainers
|
fakeDocker.ExitedContainerList = exitedAPIContainers
|
||||||
fakeDocker.ContainerMap = containerMap
|
fakeDocker.ContainerMap = containerMap
|
||||||
fakeDocker.ClearCalls()
|
fakeDocker.ClearCalls()
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@@ -4035,7 +4035,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
|||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"},
|
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||||
ID: "9876",
|
ID: "9876",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -4048,7 +4048,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
|||||||
waitGroup.Wait()
|
waitGroup.Wait()
|
||||||
|
|
||||||
// Check if we can retrieve the pod status from GetPodStatus().
|
// Check if we can retrieve the pod status from GetPodStatus().
|
||||||
podName := kubecontainer.GetPodFullName(&pods[0])
|
podName := kubecontainer.GetPodFullName(pods[0])
|
||||||
status, err := kubelet.GetPodStatus(podName)
|
status, err := kubelet.GetPodStatus(podName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", podName, err)
|
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", podName, err)
|
||||||
@@ -4084,7 +4084,7 @@ func TestGetPodCreationFailureReason(t *testing.T) {
|
|||||||
"create": fmt.Errorf("%s", failureReason),
|
"create": fmt.Errorf("%s", failureReason),
|
||||||
}
|
}
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||||
pod := api.Pod{
|
pod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
@@ -4096,13 +4096,13 @@ func TestGetPodCreationFailureReason(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pods := []api.Pod{pod}
|
pods := []*api.Pod{pod}
|
||||||
kubelet.podManager.SetPods(pods)
|
kubelet.podManager.SetPods(pods)
|
||||||
_, err := kubelet.runContainer(&pod, &pod.Spec.Containers[0], make(map[string]volume.Volume), "", "")
|
_, err := kubelet.runContainer(pod, &pod.Spec.Containers[0], make(map[string]volume.Volume), "", "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected error, found nil")
|
t.Errorf("expected error, found nil")
|
||||||
}
|
}
|
||||||
status, err := kubelet.GetPodStatus(kubecontainer.GetPodFullName(&pod))
|
status, err := kubelet.GetPodStatus(kubecontainer.GetPodFullName(pod))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error %v", err)
|
t.Errorf("unexpected error %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -43,11 +43,11 @@ import (
|
|||||||
// also be removed.
|
// also be removed.
|
||||||
|
|
||||||
type podManager interface {
|
type podManager interface {
|
||||||
GetPods() []api.Pod
|
GetPods() []*api.Pod
|
||||||
GetPodByFullName(podFullName string) (*api.Pod, bool)
|
GetPodByFullName(podFullName string) (*api.Pod, bool)
|
||||||
GetPodByName(namespace, name string) (*api.Pod, bool)
|
GetPodByName(namespace, name string) (*api.Pod, bool)
|
||||||
GetPodsAndMirrorMap() ([]api.Pod, map[string]api.Pod)
|
GetPodsAndMirrorMap() ([]*api.Pod, map[string]api.Pod)
|
||||||
SetPods(pods []api.Pod)
|
SetPods(pods []*api.Pod)
|
||||||
UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType)
|
UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType)
|
||||||
DeleteOrphanedMirrorPods()
|
DeleteOrphanedMirrorPods()
|
||||||
TranslatePodUID(uid types.UID) types.UID
|
TranslatePodUID(uid types.UID) types.UID
|
||||||
@@ -78,7 +78,7 @@ type basicPodManager struct {
|
|||||||
func newBasicPodManager(apiserverClient client.Interface) *basicPodManager {
|
func newBasicPodManager(apiserverClient client.Interface) *basicPodManager {
|
||||||
pm := &basicPodManager{}
|
pm := &basicPodManager{}
|
||||||
pm.mirrorClient = newBasicMirrorClient(apiserverClient)
|
pm.mirrorClient = newBasicMirrorClient(apiserverClient)
|
||||||
pm.SetPods([]api.Pod{})
|
pm.SetPods(nil)
|
||||||
return pm
|
return pm
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,27 +127,26 @@ func (self *basicPodManager) UpdatePods(u PodUpdate, podSyncTypes map[types.UID]
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the internal pods based on the new pods.
|
// Set the internal pods based on the new pods.
|
||||||
func (self *basicPodManager) SetPods(newPods []api.Pod) {
|
func (self *basicPodManager) SetPods(newPods []*api.Pod) {
|
||||||
self.lock.Lock()
|
self.lock.Lock()
|
||||||
defer self.lock.Unlock()
|
defer self.lock.Unlock()
|
||||||
self.setPods(newPods)
|
self.setPods(newPods)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *basicPodManager) setPods(newPods []api.Pod) {
|
func (self *basicPodManager) setPods(newPods []*api.Pod) {
|
||||||
podByUID := make(map[types.UID]*api.Pod)
|
podByUID := make(map[types.UID]*api.Pod)
|
||||||
mirrorPodByUID := make(map[types.UID]*api.Pod)
|
mirrorPodByUID := make(map[types.UID]*api.Pod)
|
||||||
podByFullName := make(map[string]*api.Pod)
|
podByFullName := make(map[string]*api.Pod)
|
||||||
mirrorPodByFullName := make(map[string]*api.Pod)
|
mirrorPodByFullName := make(map[string]*api.Pod)
|
||||||
|
|
||||||
for i := range newPods {
|
for _, pod := range newPods {
|
||||||
pod := newPods[i]
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
podFullName := kubecontainer.GetPodFullName(&pod)
|
if isMirrorPod(pod) {
|
||||||
if isMirrorPod(&pod) {
|
mirrorPodByUID[pod.UID] = pod
|
||||||
mirrorPodByUID[pod.UID] = &pod
|
mirrorPodByFullName[podFullName] = pod
|
||||||
mirrorPodByFullName[podFullName] = &pod
|
|
||||||
} else {
|
} else {
|
||||||
podByUID[pod.UID] = &pod
|
podByUID[pod.UID] = pod
|
||||||
podByFullName[podFullName] = &pod
|
podByFullName[podFullName] = pod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,21 +156,19 @@ func (self *basicPodManager) setPods(newPods []api.Pod) {
|
|||||||
self.mirrorPodByFullName = mirrorPodByFullName
|
self.mirrorPodByFullName = mirrorPodByFullName
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyUpdates(changed []api.Pod, current []api.Pod) []api.Pod {
|
func applyUpdates(changed []*api.Pod, current []*api.Pod) []*api.Pod {
|
||||||
updated := []api.Pod{}
|
updated := []*api.Pod{}
|
||||||
m := map[types.UID]*api.Pod{}
|
m := map[types.UID]*api.Pod{}
|
||||||
for i := range changed {
|
for _, pod := range changed {
|
||||||
pod := &changed[i]
|
|
||||||
m[pod.UID] = pod
|
m[pod.UID] = pod
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range current {
|
for _, pod := range current {
|
||||||
pod := ¤t[i]
|
|
||||||
if m[pod.UID] != nil {
|
if m[pod.UID] != nil {
|
||||||
updated = append(updated, *m[pod.UID])
|
updated = append(updated, m[pod.UID])
|
||||||
glog.V(4).Infof("pod with UID: %q has a new spec %+v", pod.UID, *m[pod.UID])
|
glog.V(4).Infof("pod with UID: %q has a new spec %+v", pod.UID, *m[pod.UID])
|
||||||
} else {
|
} else {
|
||||||
updated = append(updated, *pod)
|
updated = append(updated, pod)
|
||||||
glog.V(4).Infof("pod with UID: %q stay with the same spec %+v", pod.UID, *pod)
|
glog.V(4).Infof("pod with UID: %q stay with the same spec %+v", pod.UID, *pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -180,20 +177,20 @@ func applyUpdates(changed []api.Pod, current []api.Pod) []api.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetPods returns the regular pods bound to the kubelet and their spec.
|
// GetPods returns the regular pods bound to the kubelet and their spec.
|
||||||
func (self *basicPodManager) GetPods() []api.Pod {
|
func (self *basicPodManager) GetPods() []*api.Pod {
|
||||||
self.lock.RLock()
|
self.lock.RLock()
|
||||||
defer self.lock.RUnlock()
|
defer self.lock.RUnlock()
|
||||||
return podsMapToPods(self.podByUID)
|
return podsMapToPods(self.podByUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns all pods (including mirror pods).
|
// Returns all pods (including mirror pods).
|
||||||
func (self *basicPodManager) getAllPods() []api.Pod {
|
func (self *basicPodManager) getAllPods() []*api.Pod {
|
||||||
return append(podsMapToPods(self.podByUID), podsMapToPods(self.mirrorPodByUID)...)
|
return append(podsMapToPods(self.podByUID), podsMapToPods(self.mirrorPodByUID)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodsAndMirrorMap returns the a copy of the regular pods and the mirror
|
// GetPodsAndMirrorMap returns the a copy of the regular pods and the mirror
|
||||||
// pods indexed by full name.
|
// pods indexed by full name.
|
||||||
func (self *basicPodManager) GetPodsAndMirrorMap() ([]api.Pod, map[string]api.Pod) {
|
func (self *basicPodManager) GetPodsAndMirrorMap() ([]*api.Pod, map[string]api.Pod) {
|
||||||
self.lock.RLock()
|
self.lock.RLock()
|
||||||
defer self.lock.RUnlock()
|
defer self.lock.RUnlock()
|
||||||
mirrorPods := make(map[string]api.Pod)
|
mirrorPods := make(map[string]api.Pod)
|
||||||
@@ -270,10 +267,10 @@ func (self *basicPodManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool {
|
|||||||
return api.Semantic.DeepEqual(&pod.Spec, &mirrorPod.Spec)
|
return api.Semantic.DeepEqual(&pod.Spec, &mirrorPod.Spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func podsMapToPods(UIDMap map[types.UID]*api.Pod) []api.Pod {
|
func podsMapToPods(UIDMap map[types.UID]*api.Pod) []*api.Pod {
|
||||||
pods := make([]api.Pod, 0, len(UIDMap))
|
pods := make([]*api.Pod, 0, len(UIDMap))
|
||||||
for _, pod := range UIDMap {
|
for _, pod := range UIDMap {
|
||||||
pods = append(pods, *pod)
|
pods = append(pods, pod)
|
||||||
}
|
}
|
||||||
return pods
|
return pods
|
||||||
}
|
}
|
||||||
|
@@ -34,7 +34,7 @@ func newFakePodManager() (*basicPodManager, *fakeMirrorClient) {
|
|||||||
// Tests that pods/maps are properly set after the pod update, and the basic
|
// Tests that pods/maps are properly set after the pod update, and the basic
|
||||||
// methods work correctly.
|
// methods work correctly.
|
||||||
func TestGetSetPods(t *testing.T) {
|
func TestGetSetPods(t *testing.T) {
|
||||||
mirrorPod := api.Pod{
|
mirrorPod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "987654321",
|
UID: "987654321",
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
@@ -45,7 +45,7 @@ func TestGetSetPods(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
staticPod := api.Pod{
|
staticPod := &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "123456789",
|
UID: "123456789",
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
@@ -54,7 +54,7 @@ func TestGetSetPods(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedPods := []api.Pod{
|
expectedPods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "999999999",
|
UID: "999999999",
|
||||||
@@ -98,11 +98,11 @@ func TestGetSetPods(t *testing.T) {
|
|||||||
|
|
||||||
// Test the basic Get methods.
|
// Test the basic Get methods.
|
||||||
actualPod, ok := podManager.GetPodByFullName("bar_default")
|
actualPod, ok := podManager.GetPodByFullName("bar_default")
|
||||||
if !ok || !reflect.DeepEqual(actualPod, &staticPod) {
|
if !ok || !reflect.DeepEqual(actualPod, staticPod) {
|
||||||
t.Errorf("unable to get pod by full name; expected: %#v, got: %#v", staticPod, actualPod)
|
t.Errorf("unable to get pod by full name; expected: %#v, got: %#v", staticPod, actualPod)
|
||||||
}
|
}
|
||||||
actualPod, ok = podManager.GetPodByName("default", "bar")
|
actualPod, ok = podManager.GetPodByName("default", "bar")
|
||||||
if !ok || !reflect.DeepEqual(actualPod, &staticPod) {
|
if !ok || !reflect.DeepEqual(actualPod, staticPod) {
|
||||||
t.Errorf("unable to get pod by name; expected: %#v, got: %#v", staticPod, actualPod)
|
t.Errorf("unable to get pod by name; expected: %#v, got: %#v", staticPod, actualPod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -51,16 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan PodUpdate) ([]RunPodResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runOnce runs a given set of pods and returns their status.
|
// runOnce runs a given set of pods and returns their status.
|
||||||
func (kl *Kubelet) runOnce(pods []api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
|
|
||||||
ch := make(chan RunPodResult)
|
ch := make(chan RunPodResult)
|
||||||
for i := range pods {
|
for _, pod := range pods {
|
||||||
pod := pods[i] // Make a copy
|
go func(pod *api.Pod) {
|
||||||
go func() {
|
|
||||||
err := kl.runPod(pod, retryDelay)
|
err := kl.runPod(pod, retryDelay)
|
||||||
ch <- RunPodResult{&pod, err}
|
ch <- RunPodResult{pod, err}
|
||||||
}()
|
}(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("waiting for %d pods", len(pods))
|
glog.Infof("waiting for %d pods", len(pods))
|
||||||
@@ -84,7 +83,7 @@ func (kl *Kubelet) runOnce(pods []api.Pod, retryDelay time.Duration) (results []
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runPod runs a single pod and wait until all containers are running.
|
// runPod runs a single pod and wait until all containers are running.
|
||||||
func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error {
|
func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
||||||
delay := retryDelay
|
delay := retryDelay
|
||||||
retry := 0
|
retry := 0
|
||||||
for {
|
for {
|
||||||
@@ -104,7 +103,7 @@ func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error {
|
|||||||
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
||||||
// We don't create mirror pods in this mode; pass a dummy boolean value
|
// We don't create mirror pods in this mode; pass a dummy boolean value
|
||||||
// to sycnPod.
|
// to sycnPod.
|
||||||
if err = kl.syncPod(&pod, nil, p); err != nil {
|
if err = kl.syncPod(pod, nil, p); err != nil {
|
||||||
return fmt.Errorf("error syncing pod: %v", err)
|
return fmt.Errorf("error syncing pod: %v", err)
|
||||||
}
|
}
|
||||||
if retry >= RunOnceMaxRetries {
|
if retry >= RunOnceMaxRetries {
|
||||||
@@ -119,7 +118,7 @@ func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isPodRunning returns true if all containers of a manifest are running.
|
// isPodRunning returns true if all containers of a manifest are running.
|
||||||
func (kl *Kubelet) isPodRunning(pod api.Pod, runningPod container.Pod) (bool, error) {
|
func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) {
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
c := runningPod.FindContainerByName(container.Name)
|
c := runningPod.FindContainerByName(container.Name)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
|
@@ -148,7 +148,7 @@ func TestRunOnce(t *testing.T) {
|
|||||||
kb.containerManager = dockertools.NewDockerManager(kb.dockerClient, kb.recorder, dockertools.PodInfraContainerImage, 0, 0)
|
kb.containerManager = dockertools.NewDockerManager(kb.dockerClient, kb.recorder, dockertools.PodInfraContainerImage, 0, 0)
|
||||||
kb.containerManager.Puller = &dockertools.FakeDockerPuller{}
|
kb.containerManager.Puller = &dockertools.FakeDockerPuller{}
|
||||||
|
|
||||||
pods := []api.Pod{
|
pods := []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
|
@@ -102,7 +102,7 @@ type HostInterface interface {
|
|||||||
GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||||
GetDockerVersion() ([]uint, error)
|
GetDockerVersion() ([]uint, error)
|
||||||
GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error)
|
GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error)
|
||||||
GetPods() []api.Pod
|
GetPods() []*api.Pod
|
||||||
GetPodByName(namespace, name string) (*api.Pod, bool)
|
GetPodByName(namespace, name string) (*api.Pod, bool)
|
||||||
GetPodStatus(name string) (api.PodStatus, error)
|
GetPodStatus(name string) (api.PodStatus, error)
|
||||||
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
||||||
@@ -279,8 +279,9 @@ func (s *Server) handleContainerLogs(w http.ResponseWriter, req *http.Request) {
|
|||||||
// handlePods returns a list of pod bound to the Kubelet and their spec
|
// handlePods returns a list of pod bound to the Kubelet and their spec
|
||||||
func (s *Server) handlePods(w http.ResponseWriter, req *http.Request) {
|
func (s *Server) handlePods(w http.ResponseWriter, req *http.Request) {
|
||||||
pods := s.host.GetPods()
|
pods := s.host.GetPods()
|
||||||
podList := &api.PodList{
|
podList := new(api.PodList)
|
||||||
Items: pods,
|
for _, pod := range pods {
|
||||||
|
podList.Items = append(podList.Items, *pod)
|
||||||
}
|
}
|
||||||
data, err := latest.Codec.Encode(podList)
|
data, err := latest.Codec.Encode(podList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -44,7 +44,7 @@ type fakeKubelet struct {
|
|||||||
containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||||
rootInfoFunc func(query *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
rootInfoFunc func(query *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||||
machineInfoFunc func() (*cadvisorApi.MachineInfo, error)
|
machineInfoFunc func() (*cadvisorApi.MachineInfo, error)
|
||||||
podsFunc func() []api.Pod
|
podsFunc func() []*api.Pod
|
||||||
logFunc func(w http.ResponseWriter, req *http.Request)
|
logFunc func(w http.ResponseWriter, req *http.Request)
|
||||||
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
|
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
|
||||||
dockerVersionFunc func() ([]uint, error)
|
dockerVersionFunc func() ([]uint, error)
|
||||||
@@ -79,7 +79,7 @@ func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error)
|
|||||||
return fk.machineInfoFunc()
|
return fk.machineInfoFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fk *fakeKubelet) GetPods() []api.Pod {
|
func (fk *fakeKubelet) GetPods() []*api.Pod {
|
||||||
return fk.podsFunc()
|
return fk.podsFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -64,7 +64,7 @@ const (
|
|||||||
// functionally similar, this helps our unit tests properly check that the correct PodUpdates
|
// functionally similar, this helps our unit tests properly check that the correct PodUpdates
|
||||||
// are generated.
|
// are generated.
|
||||||
type PodUpdate struct {
|
type PodUpdate struct {
|
||||||
Pods []api.Pod
|
Pods []*api.Pod
|
||||||
Op PodOperation
|
Op PodOperation
|
||||||
Source string
|
Source string
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,7 @@ import (
|
|||||||
type FailedPredicateMap map[string]util.StringSet
|
type FailedPredicateMap map[string]util.StringSet
|
||||||
|
|
||||||
type FitError struct {
|
type FitError struct {
|
||||||
Pod api.Pod
|
Pod *api.Pod
|
||||||
FailedPredicates FailedPredicateMap
|
FailedPredicates FailedPredicateMap
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ type genericScheduler struct {
|
|||||||
randomLock sync.Mutex
|
randomLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *genericScheduler) Schedule(pod api.Pod, minionLister MinionLister) (string, error) {
|
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (string, error) {
|
||||||
minions, err := minionLister.List()
|
minions, err := minionLister.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -97,7 +97,7 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
|
|||||||
|
|
||||||
// Filters the minions to find the ones that fit based on the given predicate functions
|
// Filters the minions to find the ones that fit based on the given predicate functions
|
||||||
// Each minion is passed through the predicate functions to determine if it is a fit
|
// Each minion is passed through the predicate functions to determine if it is a fit
|
||||||
func findNodesThatFit(pod api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
|
func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
|
||||||
filtered := []api.Node{}
|
filtered := []api.Node{}
|
||||||
machineToPods, err := MapPodsToMachines(podLister)
|
machineToPods, err := MapPodsToMachines(podLister)
|
||||||
failedPredicateMap := FailedPredicateMap{}
|
failedPredicateMap := FailedPredicateMap{}
|
||||||
@@ -133,7 +133,7 @@ func findNodesThatFit(pod api.Pod, podLister PodLister, predicates map[string]Fi
|
|||||||
// Each priority function can also have its own weight
|
// Each priority function can also have its own weight
|
||||||
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||||
// All scores are finally combined (added) to get the total weighted scores of all minions
|
// All scores are finally combined (added) to get the total weighted scores of all minions
|
||||||
func prioritizeNodes(pod api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) {
|
func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
result := HostPriorityList{}
|
result := HostPriorityList{}
|
||||||
|
|
||||||
// If no priority configs are provided, then the EqualPriority function is applied
|
// If no priority configs are provided, then the EqualPriority function is applied
|
||||||
@@ -177,7 +177,7 @@ func getBestHosts(list HostPriorityList) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
||||||
func EqualPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func EqualPriority(_ *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
nodes, err := minionLister.List()
|
nodes, err := minionLister.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Errorf("failed to list nodes: %v", err)
|
fmt.Errorf("failed to list nodes: %v", err)
|
||||||
|
@@ -27,19 +27,19 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func falsePredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func falsePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func truePredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func truePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchesPredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func matchesPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
return pod.Name == node, nil
|
return pod.Name == node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func numericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
nodes, err := minionLister.List()
|
nodes, err := minionLister.List()
|
||||||
result := []HostPriority{}
|
result := []HostPriority{}
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ func numericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func reverseNumericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func reverseNumericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
var maxScore float64
|
var maxScore float64
|
||||||
minScore := math.MaxFloat64
|
minScore := math.MaxFloat64
|
||||||
reverseResult := []HostPriority{}
|
reverseResult := []HostPriority{}
|
||||||
@@ -165,7 +165,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates map[string]FitPredicate
|
predicates map[string]FitPredicate
|
||||||
prioritizers []PriorityConfig
|
prioritizers []PriorityConfig
|
||||||
nodes []string
|
nodes []string
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
expectedHost string
|
expectedHost string
|
||||||
expectsErr bool
|
expectsErr bool
|
||||||
}{
|
}{
|
||||||
@@ -189,7 +189,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]FitPredicate{"matches": matchesPredicate},
|
predicates: map[string]FitPredicate{"matches": matchesPredicate},
|
||||||
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}},
|
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
|
||||||
expectedHost: "machine2",
|
expectedHost: "machine2",
|
||||||
name: "test 3",
|
name: "test 3",
|
||||||
},
|
},
|
||||||
@@ -204,7 +204,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]FitPredicate{"matches": matchesPredicate},
|
predicates: map[string]FitPredicate{"matches": matchesPredicate},
|
||||||
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}},
|
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||||
nodes: []string{"3", "2", "1"},
|
nodes: []string{"3", "2", "1"},
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
||||||
expectedHost: "2",
|
expectedHost: "2",
|
||||||
name: "test 5",
|
name: "test 5",
|
||||||
},
|
},
|
||||||
@@ -212,7 +212,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]FitPredicate{"true": truePredicate},
|
predicates: map[string]FitPredicate{"true": truePredicate},
|
||||||
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
|
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
|
||||||
nodes: []string{"3", "2", "1"},
|
nodes: []string{"3", "2", "1"},
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
||||||
expectedHost: "1",
|
expectedHost: "1",
|
||||||
name: "test 6",
|
name: "test 6",
|
||||||
},
|
},
|
||||||
@@ -227,7 +227,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
random := rand.New(rand.NewSource(0))
|
random := rand.New(rand.NewSource(0))
|
||||||
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]api.Pod{}), random)
|
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]*api.Pod{}), random)
|
||||||
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeNodeList(test.nodes)))
|
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeNodeList(test.nodes)))
|
||||||
if test.expectsErr {
|
if test.expectsErr {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -247,7 +247,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
func TestFindFitAllError(t *testing.T) {
|
func TestFindFitAllError(t *testing.T) {
|
||||||
nodes := []string{"3", "2", "1"}
|
nodes := []string{"3", "2", "1"}
|
||||||
predicates := map[string]FitPredicate{"true": truePredicate, "false": falsePredicate}
|
predicates := map[string]FitPredicate{"true": truePredicate, "false": falsePredicate}
|
||||||
_, predicateMap, err := findNodesThatFit(api.Pod{}, FakePodLister([]api.Pod{}), predicates, makeNodeList(nodes))
|
_, predicateMap, err := findNodesThatFit(&api.Pod{}, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@@ -271,8 +271,8 @@ func TestFindFitAllError(t *testing.T) {
|
|||||||
func TestFindFitSomeError(t *testing.T) {
|
func TestFindFitSomeError(t *testing.T) {
|
||||||
nodes := []string{"3", "2", "1"}
|
nodes := []string{"3", "2", "1"}
|
||||||
predicates := map[string]FitPredicate{"true": truePredicate, "match": matchesPredicate}
|
predicates := map[string]FitPredicate{"true": truePredicate, "match": matchesPredicate}
|
||||||
pod := api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
|
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
|
||||||
_, predicateMap, err := findNodesThatFit(pod, FakePodLister([]api.Pod{}), predicates, makeNodeList(nodes))
|
_, predicateMap, err := findNodesThatFit(pod, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
@@ -39,14 +39,14 @@ func (f FakeMinionLister) List() (api.NodeList, error) {
|
|||||||
// PodLister interface represents anything that can list pods for a scheduler.
|
// PodLister interface represents anything that can list pods for a scheduler.
|
||||||
type PodLister interface {
|
type PodLister interface {
|
||||||
// TODO: make this exactly the same as client's Pods(ns).List() method, by returning a api.PodList
|
// TODO: make this exactly the same as client's Pods(ns).List() method, by returning a api.PodList
|
||||||
List(labels.Selector) ([]api.Pod, error)
|
List(labels.Selector) ([]*api.Pod, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakePodLister implements PodLister on an []api.Pods for test purposes.
|
// FakePodLister implements PodLister on an []api.Pods for test purposes.
|
||||||
type FakePodLister []api.Pod
|
type FakePodLister []*api.Pod
|
||||||
|
|
||||||
// List returns []api.Pod matching a query.
|
// List returns []api.Pod matching a query.
|
||||||
func (f FakePodLister) List(s labels.Selector) (selected []api.Pod, err error) {
|
func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) {
|
||||||
for _, pod := range f {
|
for _, pod := range f {
|
||||||
if s.Matches(labels.Set(pod.Labels)) {
|
if s.Matches(labels.Set(pod.Labels)) {
|
||||||
selected = append(selected, pod)
|
selected = append(selected, pod)
|
||||||
@@ -60,7 +60,7 @@ type ServiceLister interface {
|
|||||||
// Lists all the services
|
// Lists all the services
|
||||||
List() (api.ServiceList, error)
|
List() (api.ServiceList, error)
|
||||||
// Gets the services for the given pod
|
// Gets the services for the given pod
|
||||||
GetPodServices(api.Pod) ([]api.Service, error)
|
GetPodServices(*api.Pod) ([]api.Service, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakeServiceLister implements ServiceLister on []api.Service for test purposes.
|
// FakeServiceLister implements ServiceLister on []api.Service for test purposes.
|
||||||
@@ -72,7 +72,7 @@ func (f FakeServiceLister) List() (api.ServiceList, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetPodServices gets the services that have the selector that match the labels on the given pod
|
// GetPodServices gets the services that have the selector that match the labels on the given pod
|
||||||
func (f FakeServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) {
|
func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) {
|
||||||
var selector labels.Selector
|
var selector labels.Selector
|
||||||
|
|
||||||
for _, service := range f {
|
for _, service := range f {
|
||||||
|
@@ -80,11 +80,11 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
|||||||
// are exclusive so if there is already a volume mounted on that node, another pod can't schedule
|
// are exclusive so if there is already a volume mounted on that node, another pod can't schedule
|
||||||
// there. This is GCE specific for now.
|
// there. This is GCE specific for now.
|
||||||
// TODO: migrate this into some per-volume specific code?
|
// TODO: migrate this into some per-volume specific code?
|
||||||
func NoDiskConflict(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
manifest := &(pod.Spec)
|
manifest := &(pod.Spec)
|
||||||
for ix := range manifest.Volumes {
|
for ix := range manifest.Volumes {
|
||||||
for podIx := range existingPods {
|
for podIx := range existingPods {
|
||||||
if isVolumeConflict(manifest.Volumes[ix], &existingPods[podIx]) {
|
if isVolumeConflict(manifest.Volumes[ix], existingPods[podIx]) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -111,31 +111,31 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func CheckPodsExceedingCapacity(pods []api.Pod, capacity api.ResourceList) (fitting []api.Pod, notFitting []api.Pod) {
|
func CheckPodsExceedingCapacity(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFitting []*api.Pod) {
|
||||||
totalMilliCPU := capacity.Cpu().MilliValue()
|
totalMilliCPU := capacity.Cpu().MilliValue()
|
||||||
totalMemory := capacity.Memory().Value()
|
totalMemory := capacity.Memory().Value()
|
||||||
milliCPURequested := int64(0)
|
milliCPURequested := int64(0)
|
||||||
memoryRequested := int64(0)
|
memoryRequested := int64(0)
|
||||||
for ix := range pods {
|
for _, pod := range pods {
|
||||||
podRequest := getResourceRequest(&pods[ix])
|
podRequest := getResourceRequest(pod)
|
||||||
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
||||||
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
||||||
if !fitsCPU || !fitsMemory {
|
if !fitsCPU || !fitsMemory {
|
||||||
// the pod doesn't fit
|
// the pod doesn't fit
|
||||||
notFitting = append(notFitting, pods[ix])
|
notFitting = append(notFitting, pod)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// the pod fits
|
// the pod fits
|
||||||
milliCPURequested += podRequest.milliCPU
|
milliCPURequested += podRequest.milliCPU
|
||||||
memoryRequested += podRequest.memory
|
memoryRequested += podRequest.memory
|
||||||
fitting = append(fitting, pods[ix])
|
fitting = append(fitting, pod)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodFitsResources calculates fit based on requested, rather than used resources
|
// PodFitsResources calculates fit based on requested, rather than used resources
|
||||||
func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
podRequest := getResourceRequest(&pod)
|
podRequest := getResourceRequest(pod)
|
||||||
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
|
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
|
||||||
// no resources requested always fits.
|
// no resources requested always fits.
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -144,7 +144,7 @@ func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
pods := []api.Pod{}
|
pods := []*api.Pod{}
|
||||||
copy(pods, existingPods)
|
copy(pods, existingPods)
|
||||||
pods = append(existingPods, pod)
|
pods = append(existingPods, pod)
|
||||||
_, exceeding := CheckPodsExceedingCapacity(pods, info.Status.Capacity)
|
_, exceeding := CheckPodsExceedingCapacity(pods, info.Status.Capacity)
|
||||||
@@ -180,15 +180,15 @@ type NodeSelector struct {
|
|||||||
info NodeInfo
|
info NodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
minion, err := n.info.GetNodeInfo(node)
|
minion, err := n.info.GetNodeInfo(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return PodMatchesNodeLabels(&pod, minion), nil
|
return PodMatchesNodeLabels(pod, minion), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsHost(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
if len(pod.Spec.Host) == 0 {
|
if len(pod.Spec.Host) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@@ -222,7 +222,7 @@ func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPre
|
|||||||
// Alternately, eliminating minions that have a certain label, regardless of value, is also useful
|
// Alternately, eliminating minions that have a certain label, regardless of value, is also useful
|
||||||
// A minion may have a label with "retiring" as key and the date as the value
|
// A minion may have a label with "retiring" as key and the date as the value
|
||||||
// and it may be desirable to avoid scheduling new pods on this minion
|
// and it may be desirable to avoid scheduling new pods on this minion
|
||||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
var exists bool
|
var exists bool
|
||||||
minion, err := n.info.GetNodeInfo(node)
|
minion, err := n.info.GetNodeInfo(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -264,7 +264,7 @@ func NewServiceAffinityPredicate(podLister PodLister, serviceLister ServiceListe
|
|||||||
// - L is listed in the ServiceAffinity object that is passed into the function
|
// - L is listed in the ServiceAffinity object that is passed into the function
|
||||||
// - the pod does not have any NodeSelector for L
|
// - the pod does not have any NodeSelector for L
|
||||||
// - some other pod from the same service is already scheduled onto a minion that has value V for label L
|
// - some other pod from the same service is already scheduled onto a minion that has value V for label L
|
||||||
func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
var affinitySelector labels.Selector
|
var affinitySelector labels.Selector
|
||||||
|
|
||||||
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
||||||
@@ -292,7 +292,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.P
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
// consider only the pods that belong to the same namespace
|
// consider only the pods that belong to the same namespace
|
||||||
nsServicePods := []api.Pod{}
|
nsServicePods := []*api.Pod{}
|
||||||
for _, nsPod := range servicePods {
|
for _, nsPod := range servicePods {
|
||||||
if nsPod.Namespace == pod.Namespace {
|
if nsPod.Namespace == pod.Namespace {
|
||||||
nsServicePods = append(nsServicePods, nsPod)
|
nsServicePods = append(nsServicePods, nsPod)
|
||||||
@@ -333,7 +333,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.P
|
|||||||
return affinitySelector.Matches(labels.Set(minion.Labels)), nil
|
return affinitySelector.Matches(labels.Set(minion.Labels)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
existingPorts := getUsedPorts(existingPods...)
|
existingPorts := getUsedPorts(existingPods...)
|
||||||
wantPorts := getUsedPorts(pod)
|
wantPorts := getUsedPorts(pod)
|
||||||
for wport := range wantPorts {
|
for wport := range wantPorts {
|
||||||
@@ -347,7 +347,7 @@ func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUsedPorts(pods ...api.Pod) map[int]bool {
|
func getUsedPorts(pods ...*api.Pod) map[int]bool {
|
||||||
ports := make(map[int]bool)
|
ports := make(map[int]bool)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
@@ -361,12 +361,12 @@ func getUsedPorts(pods ...api.Pod) map[int]bool {
|
|||||||
|
|
||||||
// MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names
|
// MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names
|
||||||
// and the values are the list of pods running on that host.
|
// and the values are the list of pods running on that host.
|
||||||
func MapPodsToMachines(lister PodLister) (map[string][]api.Pod, error) {
|
func MapPodsToMachines(lister PodLister) (map[string][]*api.Pod, error) {
|
||||||
machineToPods := map[string][]api.Pod{}
|
machineToPods := map[string][]*api.Pod{}
|
||||||
// TODO: perform more targeted query...
|
// TODO: perform more targeted query...
|
||||||
pods, err := lister.List(labels.Everything())
|
pods, err := lister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return map[string][]api.Pod{}, err
|
return map[string][]*api.Pod{}, err
|
||||||
}
|
}
|
||||||
for _, scheduledPod := range pods {
|
for _, scheduledPod := range pods {
|
||||||
host := scheduledPod.Spec.Host
|
host := scheduledPod.Spec.Host
|
||||||
|
@@ -52,7 +52,7 @@ func makeResources(milliCPU int64, memory int64) api.NodeResources {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newResourcePod(usage ...resourceRequest) api.Pod {
|
func newResourcePod(usage ...resourceRequest) *api.Pod {
|
||||||
containers := []api.Container{}
|
containers := []api.Container{}
|
||||||
for _, req := range usage {
|
for _, req := range usage {
|
||||||
containers = append(containers, api.Container{
|
containers = append(containers, api.Container{
|
||||||
@@ -64,7 +64,7 @@ func newResourcePod(usage ...resourceRequest) api.Pod {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return api.Pod{
|
return &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Containers: containers,
|
Containers: containers,
|
||||||
},
|
},
|
||||||
@@ -73,14 +73,14 @@ func newResourcePod(usage ...resourceRequest) api.Pod {
|
|||||||
|
|
||||||
func TestPodFitsResources(t *testing.T) {
|
func TestPodFitsResources(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
existingPods []api.Pod
|
existingPods []*api.Pod
|
||||||
fits bool
|
fits bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: api.Pod{},
|
pod: &api.Pod{},
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
||||||
},
|
},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -88,7 +88,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -96,7 +96,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
|
||||||
},
|
},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -104,7 +104,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}),
|
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -112,7 +112,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
||||||
},
|
},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -135,19 +135,19 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
|
|
||||||
func TestPodFitsHost(t *testing.T) {
|
func TestPodFitsHost(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
node string
|
node string
|
||||||
fits bool
|
fits bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: api.Pod{},
|
pod: &api.Pod{},
|
||||||
node: "foo",
|
node: "foo",
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "no host specified",
|
test: "no host specified",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
},
|
},
|
||||||
@@ -157,7 +157,7 @@ func TestPodFitsHost(t *testing.T) {
|
|||||||
test: "host matches",
|
test: "host matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Host: "bar",
|
Host: "bar",
|
||||||
},
|
},
|
||||||
@@ -169,7 +169,7 @@ func TestPodFitsHost(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
result, err := PodFitsHost(test.pod, []api.Pod{}, test.node)
|
result, err := PodFitsHost(test.pod, []*api.Pod{}, test.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -181,20 +181,20 @@ func TestPodFitsHost(t *testing.T) {
|
|||||||
|
|
||||||
func TestPodFitsPorts(t *testing.T) {
|
func TestPodFitsPorts(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
existingPods []api.Pod
|
existingPods []*api.Pod
|
||||||
fits bool
|
fits bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: api.Pod{},
|
pod: &api.Pod{},
|
||||||
existingPods: []api.Pod{},
|
existingPods: []*api.Pod{},
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "nothing running",
|
test: "nothing running",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newPod("m1", 8080),
|
pod: newPod("m1", 8080),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newPod("m1", 9090),
|
newPod("m1", 9090),
|
||||||
},
|
},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -202,7 +202,7 @@ func TestPodFitsPorts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newPod("m1", 8080),
|
pod: newPod("m1", 8080),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newPod("m1", 8080),
|
newPod("m1", 8080),
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -210,7 +210,7 @@ func TestPodFitsPorts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newPod("m1", 8000, 8080),
|
pod: newPod("m1", 8000, 8080),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newPod("m1", 8080),
|
newPod("m1", 8080),
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -218,7 +218,7 @@ func TestPodFitsPorts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newPod("m1", 8000, 8080),
|
pod: newPod("m1", 8000, 8080),
|
||||||
existingPods: []api.Pod{
|
existingPods: []*api.Pod{
|
||||||
newPod("m1", 8001, 8080),
|
newPod("m1", 8001, 8080),
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -238,25 +238,25 @@ func TestPodFitsPorts(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetUsedPorts(t *testing.T) {
|
func TestGetUsedPorts(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
|
|
||||||
ports map[int]bool
|
ports map[int]bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
[]api.Pod{
|
[]*api.Pod{
|
||||||
newPod("m1", 9090),
|
newPod("m1", 9090),
|
||||||
},
|
},
|
||||||
map[int]bool{9090: true},
|
map[int]bool{9090: true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]api.Pod{
|
[]*api.Pod{
|
||||||
newPod("m1", 9090),
|
newPod("m1", 9090),
|
||||||
newPod("m1", 9091),
|
newPod("m1", 9091),
|
||||||
},
|
},
|
||||||
map[int]bool{9090: true, 9091: true},
|
map[int]bool{9090: true, 9091: true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]api.Pod{
|
[]*api.Pod{
|
||||||
newPod("m1", 9090),
|
newPod("m1", 9090),
|
||||||
newPod("m2", 9091),
|
newPod("m2", 9091),
|
||||||
},
|
},
|
||||||
@@ -296,15 +296,15 @@ func TestDiskConflicts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
existingPods []api.Pod
|
existingPods []*api.Pod
|
||||||
isOk bool
|
isOk bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{api.Pod{}, []api.Pod{}, true, "nothing"},
|
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
|
||||||
{api.Pod{}, []api.Pod{{Spec: volState}}, true, "one state"},
|
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
|
||||||
{api.Pod{Spec: volState}, []api.Pod{{Spec: volState}}, false, "same state"},
|
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
|
||||||
{api.Pod{Spec: volState2}, []api.Pod{{Spec: volState}}, true, "different state"},
|
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
@@ -345,15 +345,15 @@ func TestAWSDiskConflicts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
existingPods []api.Pod
|
existingPods []*api.Pod
|
||||||
isOk bool
|
isOk bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{api.Pod{}, []api.Pod{}, true, "nothing"},
|
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
|
||||||
{api.Pod{}, []api.Pod{{Spec: volState}}, true, "one state"},
|
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
|
||||||
{api.Pod{Spec: volState}, []api.Pod{{Spec: volState}}, false, "same state"},
|
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
|
||||||
{api.Pod{Spec: volState2}, []api.Pod{{Spec: volState}}, true, "different state"},
|
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
@@ -372,18 +372,18 @@ func TestAWSDiskConflicts(t *testing.T) {
|
|||||||
|
|
||||||
func TestPodFitsSelector(t *testing.T) {
|
func TestPodFitsSelector(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
labels map[string]string
|
labels map[string]string
|
||||||
fits bool
|
fits bool
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: api.Pod{},
|
pod: &api.Pod{},
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "no selector",
|
test: "no selector",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -394,7 +394,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
test: "missing labels",
|
test: "missing labels",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -408,7 +408,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
test: "same labels",
|
test: "same labels",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -423,7 +423,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
test: "node labels are superset",
|
test: "node labels are superset",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{
|
pod: &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
NodeSelector: map[string]string{
|
NodeSelector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -442,7 +442,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
|
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
|
||||||
|
|
||||||
fit := NodeSelector{FakeNodeInfo(node)}
|
fit := NodeSelector{FakeNodeInfo(node)}
|
||||||
fits, err := fit.PodSelectorMatches(test.pod, []api.Pod{}, "machine")
|
fits, err := fit.PodSelectorMatches(test.pod, []*api.Pod{}, "machine")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -455,8 +455,8 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
func TestNodeLabelPresence(t *testing.T) {
|
func TestNodeLabelPresence(t *testing.T) {
|
||||||
label := map[string]string{"foo": "bar", "bar": "foo"}
|
label := map[string]string{"foo": "bar", "bar": "foo"}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
existingPods []api.Pod
|
existingPods []*api.Pod
|
||||||
labels []string
|
labels []string
|
||||||
presence bool
|
presence bool
|
||||||
fits bool
|
fits bool
|
||||||
@@ -536,8 +536,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}}
|
node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}}
|
||||||
node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}}
|
node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
services []api.Service
|
services []api.Service
|
||||||
node string
|
node string
|
||||||
labels []string
|
labels []string
|
||||||
@@ -545,28 +545,29 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
pod: new(api.Pod),
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
fits: true,
|
fits: true,
|
||||||
labels: []string{"region"},
|
labels: []string{"region"},
|
||||||
test: "nothing scheduled",
|
test: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
|
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
fits: true,
|
fits: true,
|
||||||
labels: []string{"region"},
|
labels: []string{"region"},
|
||||||
test: "pod with region label match",
|
test: "pod with region label match",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
|
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
fits: false,
|
fits: false,
|
||||||
labels: []string{"region"},
|
labels: []string{"region"},
|
||||||
test: "pod with region label mismatch",
|
test: "pod with region label mismatch",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -574,8 +575,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service pod on same minion",
|
test: "service pod on same minion",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -583,8 +584,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service pod on different minion, region match",
|
test: "service pod on different minion, region match",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -592,8 +593,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service pod on different minion, region mismatch",
|
test: "service pod on different minion, region mismatch",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -601,8 +602,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service in different namespace, region mismatch",
|
test: "service in different namespace, region mismatch",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -610,8 +611,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "pod in different namespace, region mismatch",
|
test: "pod in different namespace, region mismatch",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -619,8 +620,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service and pod in same namespace, region mismatch",
|
test: "service and pod in same namespace, region mismatch",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
||||||
node: "machine1",
|
node: "machine1",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||||
fits: false,
|
fits: false,
|
||||||
@@ -628,8 +629,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
test: "service pod on different minion, multiple labels, not all match",
|
test: "service pod on different minion, multiple labels, not all match",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||||
pods: []api.Pod{{Spec: api.PodSpec{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
|
||||||
node: "machine4",
|
node: "machine4",
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||||
fits: true,
|
fits: true,
|
||||||
@@ -641,7 +642,7 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodes := []api.Node{node1, node2, node3, node4, node5}
|
nodes := []api.Node{node1, node2, node3, node4, node5}
|
||||||
serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
|
serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
|
||||||
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []api.Pod{}, test.node)
|
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -39,7 +39,7 @@ func calculateScore(requested, capacity int64, node string) int {
|
|||||||
|
|
||||||
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
||||||
// 'pods' is a list of pods currently scheduled on the node.
|
// 'pods' is a list of pods currently scheduled on the node.
|
||||||
func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {
|
func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority {
|
||||||
totalMilliCPU := int64(0)
|
totalMilliCPU := int64(0)
|
||||||
totalMemory := int64(0)
|
totalMemory := int64(0)
|
||||||
for _, existingPod := range pods {
|
for _, existingPod := range pods {
|
||||||
@@ -78,7 +78,7 @@ func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority
|
|||||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||||
// based on the minimum of the average of the fraction of requested to capacity.
|
// based on the minimum of the average of the fraction of requested to capacity.
|
||||||
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
|
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
|
||||||
func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func LeastRequestedPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
nodes, err := minionLister.List()
|
nodes, err := minionLister.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return HostPriorityList{}, err
|
return HostPriorityList{}, err
|
||||||
@@ -108,7 +108,7 @@ func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
|
|||||||
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
|
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
|
||||||
// If presence is true, prioritizes minions that have the specified label, regardless of value.
|
// If presence is true, prioritizes minions that have the specified label, regardless of value.
|
||||||
// If presence is false, prioritizes minions that do not have the specified label.
|
// If presence is false, prioritizes minions that do not have the specified label.
|
||||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
var score int
|
var score int
|
||||||
minions, err := minionLister.List()
|
minions, err := minionLister.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -141,7 +141,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister
|
|||||||
// close the two metrics are to each other.
|
// close the two metrics are to each other.
|
||||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||||
func BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func BalancedResourceAllocation(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
nodes, err := minionLister.List()
|
nodes, err := minionLister.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return HostPriorityList{}, err
|
return HostPriorityList{}, err
|
||||||
@@ -155,7 +155,7 @@ func BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister M
|
|||||||
return list, nil
|
return list, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateBalancedResourceAllocation(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {
|
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority {
|
||||||
totalMilliCPU := int64(0)
|
totalMilliCPU := int64(0)
|
||||||
totalMemory := int64(0)
|
totalMemory := int64(0)
|
||||||
score := int(0)
|
score := int(0)
|
||||||
|
@@ -98,8 +98,8 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
nodes []api.Node
|
nodes []api.Node
|
||||||
expectedList HostPriorityList
|
expectedList HostPriorityList
|
||||||
test string
|
test string
|
||||||
@@ -116,7 +116,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||||
Minion2 Score: (10 + 10) / 2 = 10
|
Minion2 Score: (10 + 10) / 2 = 10
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "nothing scheduled, nothing requested",
|
test: "nothing scheduled, nothing requested",
|
||||||
@@ -133,7 +133,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||||
Minion2 Score: (5 + 5) / 2 = 5
|
Minion2 Score: (5 + 5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}},
|
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
@@ -150,11 +150,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||||
Minion2 Score: (10 + 10) / 2 = 10
|
Minion2 Score: (10 + 10) / 2 = 10
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "no resources requested, pods scheduled",
|
test: "no resources requested, pods scheduled",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -173,11 +173,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||||
Minion2 Score: (4 + 7.5) / 2 = 5
|
Minion2 Score: (4 + 7.5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}},
|
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -196,11 +196,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
||||||
Minion2 Score: (4 + 5) / 2 = 4
|
Minion2 Score: (4 + 5) / 2 = 4
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}},
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
test: "resources requested, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@@ -217,11 +217,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
||||||
Minion2 Score: (4 + 8) / 2 = 6
|
Minion2 Score: (4 + 8) / 2 = 6
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||||
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}},
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@@ -238,21 +238,21 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||||
Minion2 Score: (0 + 5) / 2 = 2
|
Minion2 Score: (0 + 5) / 2 = 2
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuOnly},
|
pod: &api.Pod{Spec: cpuOnly},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}},
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||||
test: "requested resources exceed minion capacity",
|
test: "requested resources exceed minion capacity",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||||
test: "zero minion resources, pods scheduled with resources",
|
test: "zero minion resources, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@@ -275,8 +275,6 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
label2 := map[string]string{"bar": "foo"}
|
label2 := map[string]string{"bar": "foo"}
|
||||||
label3 := map[string]string{"bar": "baz"}
|
label3 := map[string]string{"bar": "baz"}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
|
||||||
pods []api.Pod
|
|
||||||
nodes []api.Node
|
nodes []api.Node
|
||||||
label string
|
label string
|
||||||
presence bool
|
presence bool
|
||||||
@@ -356,7 +354,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
label: test.label,
|
label: test.label,
|
||||||
presence: test.presence,
|
presence: test.presence,
|
||||||
}
|
}
|
||||||
list, err := prioritizer.CalculateNodeLabelPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
|
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -430,8 +428,8 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
nodes []api.Node
|
nodes []api.Node
|
||||||
expectedList HostPriorityList
|
expectedList HostPriorityList
|
||||||
test string
|
test string
|
||||||
@@ -448,7 +446,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 0 / 10000 = 0%
|
Memory Fraction: 0 / 10000 = 0%
|
||||||
Minion2 Score: 10 - (0-0)*10 = 10
|
Minion2 Score: 10 - (0-0)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "nothing scheduled, nothing requested",
|
test: "nothing scheduled, nothing requested",
|
||||||
@@ -465,7 +463,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 5000/10000 = 50%
|
Memory Fraction: 5000/10000 = 50%
|
||||||
Minion2 Score: 10 - (0.5-0.5)*10 = 10
|
Minion2 Score: 10 - (0.5-0.5)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
@@ -482,11 +480,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 0 / 10000 = 0%
|
Memory Fraction: 0 / 10000 = 0%
|
||||||
Minion2 Score: 10 - (0-0)*10 = 10
|
Minion2 Score: 10 - (0-0)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "no resources requested, pods scheduled",
|
test: "no resources requested, pods scheduled",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -505,11 +503,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 5000 / 20000 = 25%
|
Memory Fraction: 5000 / 20000 = 25%
|
||||||
Minion2 Score: 10 - (0.6-0.25)*10 = 6
|
Minion2 Score: 10 - (0.6-0.25)*10 = 6
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
expectedList: []HostPriority{{"machine1", 4}, {"machine2", 6}},
|
expectedList: []HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -528,11 +526,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 10000 / 20000 = 50%
|
Memory Fraction: 10000 / 20000 = 50%
|
||||||
Minion2 Score: 10 - (0.6-0.5)*10 = 9
|
Minion2 Score: 10 - (0.6-0.5)*10 = 9
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 9}},
|
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
test: "resources requested, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@@ -549,11 +547,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 10000 / 50000 = 20%
|
Memory Fraction: 10000 / 50000 = 20%
|
||||||
Minion2 Score: 10 - (0.6-0.2)*10 = 6
|
Minion2 Score: 10 - (0.6-0.2)*10 = 6
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuAndMemory},
|
pod: &api.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||||
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 6}},
|
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@@ -570,21 +568,21 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction 5000 / 10000 = 50%
|
Memory Fraction 5000 / 10000 = 50%
|
||||||
Minion2 Score: 0
|
Minion2 Score: 0
|
||||||
*/
|
*/
|
||||||
pod: api.Pod{Spec: cpuOnly},
|
pod: &api.Pod{Spec: cpuOnly},
|
||||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||||
test: "requested resources exceed minion capacity",
|
test: "requested resources exceed minion capacity",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||||
test: "zero minion resources, pods scheduled with resources",
|
test: "zero minion resources, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
|
@@ -23,5 +23,5 @@ import (
|
|||||||
// Scheduler is an interface implemented by things that know how to schedule pods
|
// Scheduler is an interface implemented by things that know how to schedule pods
|
||||||
// onto machines.
|
// onto machines.
|
||||||
type Scheduler interface {
|
type Scheduler interface {
|
||||||
Schedule(api.Pod, MinionLister) (selectedMachine string, err error)
|
Schedule(*api.Pod, MinionLister) (selectedMachine string, err error)
|
||||||
}
|
}
|
||||||
|
@@ -31,7 +31,7 @@ type schedulerTester struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if you know exactly where pod should get scheduled.
|
// Call if you know exactly where pod should get scheduled.
|
||||||
func (st *schedulerTester) expectSchedule(pod api.Pod, expected string) {
|
func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
||||||
actual, err := st.scheduler.Schedule(pod, st.minionLister)
|
actual, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
||||||
@@ -43,7 +43,7 @@ func (st *schedulerTester) expectSchedule(pod api.Pod, expected string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if you can't predict where pod will be scheduled.
|
// Call if you can't predict where pod will be scheduled.
|
||||||
func (st *schedulerTester) expectSuccess(pod api.Pod) {
|
func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
||||||
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
||||||
@@ -52,19 +52,19 @@ func (st *schedulerTester) expectSuccess(pod api.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if pod should *not* schedule.
|
// Call if pod should *not* schedule.
|
||||||
func (st *schedulerTester) expectFailure(pod api.Pod) {
|
func (st *schedulerTester) expectFailure(pod *api.Pod) {
|
||||||
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
st.t.Error("Unexpected non-error")
|
st.t.Error("Unexpected non-error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPod(host string, hostPorts ...int) api.Pod {
|
func newPod(host string, hostPorts ...int) *api.Pod {
|
||||||
networkPorts := []api.ContainerPort{}
|
networkPorts := []api.ContainerPort{}
|
||||||
for _, port := range hostPorts {
|
for _, port := range hostPorts {
|
||||||
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
|
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
|
||||||
}
|
}
|
||||||
return api.Pod{
|
return &api.Pod{
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Host: host,
|
Host: host,
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
|
@@ -34,9 +34,9 @@ func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction {
|
|||||||
|
|
||||||
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
|
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||||
// on the same machine.
|
// on the same machine.
|
||||||
func (s *ServiceSpread) CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
var maxCount int
|
var maxCount int
|
||||||
var nsServicePods []api.Pod
|
var nsServicePods []*api.Pod
|
||||||
|
|
||||||
services, err := s.serviceLister.GetPodServices(pod)
|
services, err := s.serviceLister.GetPodServices(pod)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -101,8 +101,8 @@ func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) P
|
|||||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||||
// on machines with the same value for a particular label.
|
// on machines with the same value for a particular label.
|
||||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
var nsServicePods []api.Pod
|
var nsServicePods []*api.Pod
|
||||||
|
|
||||||
services, err := s.serviceLister.GetPodServices(pod)
|
services, err := s.serviceLister.GetPodServices(pod)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@@ -40,36 +40,37 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
Host: "machine2",
|
Host: "machine2",
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
nodes []string
|
nodes []string
|
||||||
services []api.Service
|
services []api.Service
|
||||||
expectedList HostPriorityList
|
expectedList HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
pod: new(api.Pod),
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "nothing scheduled",
|
test: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{{Spec: zone1Spec}},
|
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "no services",
|
test: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "different services",
|
test: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
@@ -79,8 +80,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "two pods, one service pod",
|
test: "two pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
@@ -93,8 +94,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "five pods, one service pod in no namespace",
|
test: "five pods, one service pod in no namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
@@ -106,8 +107,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "four pods, one service pod in default namespace",
|
test: "four pods, one service pod in default namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||||
@@ -120,8 +121,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "five pods, one service pod in specific namespace",
|
test: "five pods, one service pod in specific namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -132,8 +133,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "three pods, two service pods on different machines",
|
test: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -145,8 +146,8 @@ func TestServiceSpreadPriority(t *testing.T) {
|
|||||||
test: "four pods, three service pods",
|
test: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -203,14 +204,15 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
"machine21": zone2, "machine22": zone2,
|
"machine21": zone2, "machine22": zone2,
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod api.Pod
|
pod *api.Pod
|
||||||
pods []api.Pod
|
pods []*api.Pod
|
||||||
nodes map[string]map[string]string
|
nodes map[string]map[string]string
|
||||||
services []api.Service
|
services []api.Service
|
||||||
expectedList HostPriorityList
|
expectedList HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
pod: new(api.Pod),
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||||
{"machine21", 10}, {"machine22", 10},
|
{"machine21", 10}, {"machine22", 10},
|
||||||
@@ -218,8 +220,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "nothing scheduled",
|
test: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{{Spec: zone1Spec}},
|
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||||
{"machine21", 10}, {"machine22", 10},
|
{"machine21", 10}, {"machine22", 10},
|
||||||
@@ -227,8 +229,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "no services",
|
test: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||||
@@ -237,8 +239,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "different services",
|
test: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -251,8 +253,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "three pods, one service pod",
|
test: "three pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -265,8 +267,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "three pods, two service pods on different machines",
|
test: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -280,8 +282,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "three service label match pods in different namespaces",
|
test: "three service label match pods in different namespaces",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -295,8 +297,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "four pods, three service pods",
|
test: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@@ -309,8 +311,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "service with partial pod label matches",
|
test: "service with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
pods: []api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
|
@@ -21,7 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||||
type FitPredicate func(pod api.Pod, existingPods []api.Pod, node string) (bool, error)
|
type FitPredicate func(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error)
|
||||||
|
|
||||||
// HostPriority represents the priority of scheduling to a particular host, lower priority is better.
|
// HostPriority represents the priority of scheduling to a particular host, lower priority is better.
|
||||||
type HostPriority struct {
|
type HostPriority struct {
|
||||||
@@ -46,7 +46,7 @@ func (h HostPriorityList) Swap(i, j int) {
|
|||||||
h[i], h[j] = h[j], h[i]
|
h[i], h[j] = h[j], h[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
type PriorityFunction func(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error)
|
type PriorityFunction func(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error)
|
||||||
|
|
||||||
type PriorityConfig struct {
|
type PriorityConfig struct {
|
||||||
Function PriorityFunction
|
Function PriorityFunction
|
||||||
|
@@ -115,19 +115,19 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||||||
factory.CreateFromConfig(policy)
|
factory.CreateFromConfig(policy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateOne(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func PredicateOne(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateTwo(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
func PredicateTwo(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PriorityOne(pod api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
func PriorityOne(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||||
return []algorithm.HostPriority{}, nil
|
return []algorithm.HostPriority{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PriorityTwo(pod api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||||
return []algorithm.HostPriority{}, nil
|
return []algorithm.HostPriority{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -123,7 +123,7 @@ func (s *SimpleModeler) ForgetPodByKey(key string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Extract names for readable logging.
|
// Extract names for readable logging.
|
||||||
func podNames(pods []api.Pod) []string {
|
func podNames(pods []*api.Pod) []string {
|
||||||
out := make([]string, len(pods))
|
out := make([]string, len(pods))
|
||||||
for i := range pods {
|
for i := range pods {
|
||||||
out[i] = fmt.Sprintf("'%v/%v (%v)'", pods[i].Namespace, pods[i].Name, pods[i].UID)
|
out[i] = fmt.Sprintf("'%v/%v (%v)'", pods[i].Namespace, pods[i].Name, pods[i].UID)
|
||||||
@@ -131,7 +131,7 @@ func podNames(pods []api.Pod) []string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SimpleModeler) listPods(selector labels.Selector) (pods []api.Pod, err error) {
|
func (s *SimpleModeler) listPods(selector labels.Selector) (pods []*api.Pod, err error) {
|
||||||
assumed, err := s.assumedPods.List(selector)
|
assumed, err := s.assumedPods.List(selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -140,20 +140,20 @@ func (s *SimpleModeler) listPods(selector labels.Selector) (pods []api.Pod, err
|
|||||||
// Goal here is to stop making assumptions about a pod once it shows
|
// Goal here is to stop making assumptions about a pod once it shows
|
||||||
// up in one of these other lists.
|
// up in one of these other lists.
|
||||||
for _, pod := range assumed {
|
for _, pod := range assumed {
|
||||||
qExist, err := s.queuedPods.Exists(&pod)
|
qExist, err := s.queuedPods.Exists(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if qExist {
|
if qExist {
|
||||||
s.assumedPods.Store.Delete(&pod)
|
s.assumedPods.Store.Delete(pod)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sExist, err := s.scheduledPods.Exists(&pod)
|
sExist, err := s.scheduledPods.Exists(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if sExist {
|
if sExist {
|
||||||
s.assumedPods.Store.Delete(&pod)
|
s.assumedPods.Store.Delete(pod)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -190,6 +190,6 @@ type simpleModelerPods struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List returns pods known and assumed to exist.
|
// List returns pods known and assumed to exist.
|
||||||
func (s simpleModelerPods) List(selector labels.Selector) (pods []api.Pod, err error) {
|
func (s simpleModelerPods) List(selector labels.Selector) (pods []*api.Pod, err error) {
|
||||||
return s.simpleModeler.listPods(selector)
|
return s.simpleModeler.listPods(selector)
|
||||||
}
|
}
|
||||||
|
@@ -30,15 +30,15 @@ type nn struct {
|
|||||||
|
|
||||||
type names []nn
|
type names []nn
|
||||||
|
|
||||||
func (ids names) list() []api.Pod {
|
func (ids names) list() []*api.Pod {
|
||||||
out := make([]api.Pod, len(ids))
|
out := make([]*api.Pod, 0, len(ids))
|
||||||
for i, id := range ids {
|
for _, id := range ids {
|
||||||
out[i] = api.Pod{
|
out = append(out, &api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Namespace: id.namespace,
|
Namespace: id.namespace,
|
||||||
Name: id.name,
|
Name: id.name,
|
||||||
},
|
},
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -54,9 +54,9 @@ func (ids names) has(pod *api.Pod) bool {
|
|||||||
|
|
||||||
func TestModeler(t *testing.T) {
|
func TestModeler(t *testing.T) {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
queuedPods []api.Pod
|
queuedPods []*api.Pod
|
||||||
scheduledPods []api.Pod
|
scheduledPods []*api.Pod
|
||||||
assumedPods []api.Pod
|
assumedPods []*api.Pod
|
||||||
expectPods names
|
expectPods names
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@@ -79,16 +79,16 @@ func TestModeler(t *testing.T) {
|
|||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
q := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
|
q := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
|
||||||
for i := range item.queuedPods {
|
for _, pod := range item.queuedPods {
|
||||||
q.Store.Add(&item.queuedPods[i])
|
q.Store.Add(pod)
|
||||||
}
|
}
|
||||||
s := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
|
s := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
|
||||||
for i := range item.scheduledPods {
|
for _, pod := range item.scheduledPods {
|
||||||
s.Store.Add(&item.scheduledPods[i])
|
s.Store.Add(pod)
|
||||||
}
|
}
|
||||||
m := NewSimpleModeler(q, s)
|
m := NewSimpleModeler(q, s)
|
||||||
for i := range item.assumedPods {
|
for _, pod := range item.assumedPods {
|
||||||
m.AssumePod(&item.assumedPods[i])
|
m.AssumePod(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := m.PodLister().List(labels.Everything())
|
list, err := m.PodLister().List(labels.Everything())
|
||||||
@@ -98,14 +98,14 @@ func TestModeler(t *testing.T) {
|
|||||||
|
|
||||||
found := 0
|
found := 0
|
||||||
for _, pod := range list {
|
for _, pod := range list {
|
||||||
if item.expectPods.has(&pod) {
|
if item.expectPods.has(pod) {
|
||||||
found++
|
found++
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("found unexpected pod %#v", pod)
|
t.Errorf("found unexpected pod %#v", pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e, a := item.expectPods, found; len(e) != a {
|
if e, a := item.expectPods, found; len(e) != a {
|
||||||
t.Errorf("Expected pods:\n%+v\nFound pods:\n%v\n", e, list)
|
t.Errorf("Expected pods:\n%+v\nFound pods:\n%s\n", podNames(e.list()), podNames(list))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -112,7 +112,7 @@ func (s *Scheduler) scheduleOne() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
}()
|
}()
|
||||||
dest, err := s.config.Algorithm.Schedule(*pod, s.config.MinionLister)
|
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister)
|
||||||
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("Failed to schedule: %v", pod)
|
glog.V(1).Infof("Failed to schedule: %v", pod)
|
||||||
|
@@ -59,7 +59,7 @@ type mockScheduler struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es mockScheduler) Schedule(pod api.Pod, ml scheduler.MinionLister) (string, error) {
|
func (es mockScheduler) Schedule(pod *api.Pod, ml scheduler.MinionLister) (string, error) {
|
||||||
return es.machine, es.err
|
return es.machine, es.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user