Controller codebase refactoring
This commit is contained in:
19
pkg/controller/endpoint/doc.go
Normal file
19
pkg/controller/endpoint/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package service provides EndpointController implementation
|
||||
// to manage and sync service endpoints.
|
||||
package endpointcontroller
|
416
pkg/controller/endpoint/endpoints_controller.go
Normal file
416
pkg/controller/endpoint/endpoints_controller.go
Normal file
@@ -0,0 +1,416 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// CAUTION: If you update code in this file, you may need to also update code
|
||||
// in contrib/mesos/pkg/service/endpoints_controller.go
|
||||
package endpointcontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/endpoints"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/workqueue"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// We'll attempt to recompute EVERY service's endpoints at least this
|
||||
// often. Higher numbers = lower CPU/network load; lower numbers =
|
||||
// shorter amount of time before a mistaken endpoint is corrected.
|
||||
FullServiceResyncPeriod = 30 * time.Second
|
||||
|
||||
// We'll keep pod watches open up to this long. In the unlikely case
|
||||
// that a watch misdelivers info about a pod, it'll take this long for
|
||||
// that mistake to be rectified.
|
||||
PodRelistPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
// NewEndpointController returns a new *EndpointController.
|
||||
func NewEndpointController(client *client.Client) *EndpointController {
|
||||
e := &EndpointController{
|
||||
client: client,
|
||||
queue: workqueue.New(),
|
||||
}
|
||||
|
||||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.Services(api.NamespaceAll).List(labels.Everything())
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
FullServiceResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
e.enqueueService(cur)
|
||||
},
|
||||
DeleteFunc: e.enqueueService,
|
||||
},
|
||||
)
|
||||
|
||||
e.podStore.Store, e.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
PodRelistPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.addPod,
|
||||
UpdateFunc: e.updatePod,
|
||||
DeleteFunc: e.deletePod,
|
||||
},
|
||||
)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// EndpointController manages selector-based service endpoints.
|
||||
type EndpointController struct {
|
||||
client *client.Client
|
||||
|
||||
serviceStore cache.StoreToServiceLister
|
||||
podStore cache.StoreToPodLister
|
||||
|
||||
// Services that need to be updated. A channel is inappropriate here,
|
||||
// because it allows services with lots of pods to be serviced much
|
||||
// more often than services with few pods; it also would cause a
|
||||
// service that's inserted multiple times to be processed more than
|
||||
// necessary.
|
||||
queue *workqueue.Type
|
||||
|
||||
// Since we join two objects, we'll watch both of them with
|
||||
// controllers.
|
||||
serviceController *framework.Controller
|
||||
podController *framework.Controller
|
||||
}
|
||||
|
||||
// Runs e; will not return until stopCh is closed. workers determines how many
|
||||
// endpoints will be handled in parallel.
|
||||
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer util.HandleCrash()
|
||||
go e.serviceController.Run(stopCh)
|
||||
go e.podController.Run(stopCh)
|
||||
for i := 0; i < workers; i++ {
|
||||
go util.Until(e.worker, time.Second, stopCh)
|
||||
}
|
||||
go func() {
|
||||
defer util.HandleCrash()
|
||||
time.Sleep(5 * time.Minute) // give time for our cache to fill
|
||||
e.checkLeftoverEndpoints()
|
||||
}()
|
||||
<-stopCh
|
||||
e.queue.ShutDown()
|
||||
}
|
||||
|
||||
func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (util.StringSet, error) {
|
||||
set := util.StringSet{}
|
||||
services, err := e.serviceStore.GetPodServices(pod)
|
||||
if err != nil {
|
||||
// don't log this error because this function makes pointless
|
||||
// errors when no services match.
|
||||
return set, nil
|
||||
}
|
||||
for i := range services {
|
||||
key, err := keyFunc(&services[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
return set, nil
|
||||
}
|
||||
|
||||
// When a pod is added, figure out what services it will be a member of and
|
||||
// enqueue them. obj must have *api.Pod type.
|
||||
func (e *EndpointController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
services, err := e.getPodServiceMemberships(pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err)
|
||||
return
|
||||
}
|
||||
for key := range services {
|
||||
e.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// When a pod is updated, figure out what services it used to be a member of
|
||||
// and what services it will be a member of, and enqueue the union of these.
|
||||
// old and cur must be *api.Pod types.
|
||||
func (e *EndpointController) updatePod(old, cur interface{}) {
|
||||
if api.Semantic.DeepEqual(old, cur) {
|
||||
return
|
||||
}
|
||||
newPod := old.(*api.Pod)
|
||||
services, err := e.getPodServiceMemberships(newPod)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", newPod.Namespace, newPod.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
oldPod := cur.(*api.Pod)
|
||||
// Only need to get the old services if the labels changed.
|
||||
if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) {
|
||||
oldServices, err := e.getPodServiceMemberships(oldPod)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", oldPod.Namespace, oldPod.Name, err)
|
||||
return
|
||||
}
|
||||
services = services.Union(oldServices)
|
||||
}
|
||||
for key := range services {
|
||||
e.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the services the pod used to be a member of.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (e *EndpointController) deletePod(obj interface{}) {
|
||||
if _, ok := obj.(*api.Pod); ok {
|
||||
// Enqueue all the services that the pod used to be a member
|
||||
// of. This happens to be exactly the same thing we do when a
|
||||
// pod is added.
|
||||
e.addPod(obj)
|
||||
return
|
||||
}
|
||||
podKey, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
}
|
||||
glog.Infof("Pod %q was deleted but we don't have a record of its final state, so it will take up to %v before it will be removed from all endpoint records.", podKey, FullServiceResyncPeriod)
|
||||
|
||||
// TODO: keep a map of pods to services to handle this condition.
|
||||
}
|
||||
|
||||
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (e *EndpointController) enqueueService(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
}
|
||||
|
||||
e.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and
|
||||
// marks them done. You may run as many of these in parallel as you wish; the
|
||||
// workqueue guarantees that they will not end up processing the same service
|
||||
// at the same time.
|
||||
func (e *EndpointController) worker() {
|
||||
for {
|
||||
func() {
|
||||
key, quit := e.queue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
// Use defer: in the unlikely event that there's a
|
||||
// panic, we'd still like this to get marked done--
|
||||
// otherwise the controller will not be able to sync
|
||||
// this service again until it is restarted.
|
||||
defer e.queue.Done(key)
|
||||
e.syncService(key.(string))
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EndpointController) syncService(key string) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
obj, exists, err := e.serviceStore.Store.GetByKey(key)
|
||||
if err != nil || !exists {
|
||||
// Delete the corresponding endpoint, as the service has been deleted.
|
||||
// TODO: Please note that this will delete an endpoint when a
|
||||
// service is deleted. However, if we're down at the time when
|
||||
// the service is deleted, we will miss that deletion, so this
|
||||
// doesn't completely solve the problem. See #6877.
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
|
||||
// Don't retry, as the key isn't going to magically become understandable.
|
||||
return
|
||||
}
|
||||
err = e.client.Endpoints(namespace).Delete(name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
glog.Errorf("Error deleting endpoint %q: %v", key, err)
|
||||
e.queue.Add(key) // Retry
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
service := obj.(*api.Service)
|
||||
if service.Spec.Selector == nil {
|
||||
// services without a selector receive no endpoints from this controller;
|
||||
// these services will receive the endpoints that are created out-of-band via the REST API.
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Infof("About to update endpoints for service %q", key)
|
||||
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
|
||||
if err != nil {
|
||||
// Since we're getting stuff from a local cache, it is
|
||||
// basically impossible to get this error.
|
||||
glog.Errorf("Error syncing service %q: %v", key, err)
|
||||
e.queue.Add(key) // Retry
|
||||
return
|
||||
}
|
||||
|
||||
subsets := []api.EndpointSubset{}
|
||||
for i := range pods.Items {
|
||||
pod := &pods.Items[i]
|
||||
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
|
||||
portName := servicePort.Name
|
||||
portProto := servicePort.Protocol
|
||||
portNum, err := findPort(pod, servicePort)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
continue
|
||||
}
|
||||
if len(pod.Status.PodIP) == 0 {
|
||||
glog.V(4).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if !api.IsPodReady(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
|
||||
epa := api.EndpointAddress{IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Namespace: pod.ObjectMeta.Namespace,
|
||||
Name: pod.ObjectMeta.Name,
|
||||
UID: pod.ObjectMeta.UID,
|
||||
ResourceVersion: pod.ObjectMeta.ResourceVersion,
|
||||
}}
|
||||
subsets = append(subsets, api.EndpointSubset{Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}})
|
||||
}
|
||||
}
|
||||
subsets = endpoints.RepackSubsets(subsets)
|
||||
|
||||
// See if there's actually an update here.
|
||||
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
currentEndpoints = &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: service.Labels,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Error getting endpoints: %v", err)
|
||||
e.queue.Add(key) // Retry
|
||||
return
|
||||
}
|
||||
}
|
||||
if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
|
||||
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
return
|
||||
}
|
||||
newEndpoints := currentEndpoints
|
||||
newEndpoints.Subsets = subsets
|
||||
newEndpoints.Labels = service.Labels
|
||||
|
||||
if len(currentEndpoints.ResourceVersion) == 0 {
|
||||
// No previous endpoints, create them
|
||||
_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)
|
||||
} else {
|
||||
// Pre-existing
|
||||
_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error updating endpoints: %v", err)
|
||||
e.queue.Add(key) // Retry
|
||||
}
|
||||
}
|
||||
|
||||
// checkLeftoverEndpoints lists all currently existing endpoints and adds their
|
||||
// service to the queue. This will detect endpoints that exist with no
|
||||
// corresponding service; these endpoints need to be deleted. We only need to
|
||||
// do this once on startup, because in steady-state these are detected (but
|
||||
// some stragglers could have been left behind if the endpoint controller
|
||||
// reboots).
|
||||
func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
list, err := e.client.Endpoints(api.NamespaceAll).List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err)
|
||||
return
|
||||
}
|
||||
for i := range list.Items {
|
||||
ep := &list.Items[i]
|
||||
key, err := keyFunc(ep)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get key for endpoint %#v", ep)
|
||||
continue
|
||||
}
|
||||
e.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// findPort locates the container port for the given pod and portName. If the
|
||||
// targetPort is a number, use that. If the targetPort is a string, look that
|
||||
// string up in all named ports in all containers in the target pod. If no
|
||||
// match is found, fail.
|
||||
func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) {
|
||||
portName := svcPort.TargetPort
|
||||
switch portName.Kind {
|
||||
case util.IntstrString:
|
||||
name := portName.StrVal
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return port.ContainerPort, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case util.IntstrInt:
|
||||
return portName.IntVal, nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
|
||||
}
|
529
pkg/controller/endpoint/endpoints_controller_test.go
Normal file
529
pkg/controller/endpoint/endpoints_controller_test.go
Normal file
@@ -0,0 +1,529 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointcontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
endptspkg "github.com/GoogleCloudPlatform/kubernetes/pkg/api/endpoints"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func addPods(store cache.Store, namespace string, nPods int, nPorts int) {
|
||||
for i := 0; i < nPods; i++ {
|
||||
p := &api.Pod{
|
||||
TypeMeta: api.TypeMeta{APIVersion: testapi.Version()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Ports: []api.ContainerPort{}}},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
PodIP: fmt.Sprintf("1.2.3.%d", 4+i),
|
||||
Conditions: []api.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for j := 0; j < nPorts; j++ {
|
||||
p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
|
||||
api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: 8080 + j})
|
||||
}
|
||||
store.Add(p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
containers []api.Container
|
||||
port util.IntOrString
|
||||
expected int
|
||||
pass bool
|
||||
}{{
|
||||
name: "valid int, no ports",
|
||||
containers: []api.Container{{}},
|
||||
port: util.NewIntOrStringFromInt(93),
|
||||
expected: 93,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid int, with ports",
|
||||
containers: []api.Container{{Ports: []api.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: util.NewIntOrStringFromInt(93),
|
||||
expected: 93,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, no ports",
|
||||
containers: []api.Container{{}},
|
||||
port: util.NewIntOrStringFromString("p"),
|
||||
expected: 0,
|
||||
pass: false,
|
||||
}, {
|
||||
name: "valid str, one ctr with ports",
|
||||
containers: []api.Container{{Ports: []api.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "UDP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
ContainerPort: 33,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: util.NewIntOrStringFromString("q"),
|
||||
expected: 33,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, two ctr with ports",
|
||||
containers: []api.Container{{}, {Ports: []api.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "UDP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
ContainerPort: 33,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: util.NewIntOrStringFromString("q"),
|
||||
expected: 33,
|
||||
pass: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
port, err := findPort(&api.Pod{Spec: api.PodSpec{Containers: tc.containers}},
|
||||
&api.ServicePort{Protocol: "TCP", TargetPort: tc.port})
|
||||
if err != nil && tc.pass {
|
||||
t.Errorf("unexpected error for %s: %v", tc.name, err)
|
||||
}
|
||||
if err == nil && !tc.pass {
|
||||
t.Errorf("unexpected non-error for %s: %d", tc.name, port)
|
||||
}
|
||||
if port != tc.expected {
|
||||
t.Errorf("wrong result for %s: expected %d, got %d", tc.name, tc.expected, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type serverResponse struct {
|
||||
statusCode int
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResponse) (*httptest.Server, *util.FakeHandler) {
|
||||
fakeEndpointsHandler := util.FakeHandler{
|
||||
StatusCode: endpointsResponse.statusCode,
|
||||
ResponseBody: runtime.EncodeOrDie(testapi.Codec(), endpointsResponse.obj.(runtime.Object)),
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(testapi.ResourcePath("endpoints", namespace, ""), &fakeEndpointsHandler)
|
||||
mux.Handle(testapi.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler)
|
||||
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
|
||||
t.Errorf("unexpected request: %v", req.RequestURI)
|
||||
res.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
return httptest.NewServer(mux), &fakeEndpointsHandler
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 0)
|
||||
}
|
||||
|
||||
func TestCheckLeftoverEndpoints(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
// Note that this requests *all* endpoints, therefore the NamespaceAll
|
||||
// below.
|
||||
testServer, _ := makeTestServer(t, api.NamespaceAll,
|
||||
serverResponse{http.StatusOK, &api.EndpointsList{
|
||||
ListMeta: api.ListMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Items: []api.Endpoints{{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
endpoints.checkLeftoverEndpoints()
|
||||
|
||||
if e, a := 1, endpoints.queue.Len(); e != a {
|
||||
t.Fatalf("Expected %v, got %v", e, a)
|
||||
}
|
||||
got, _ := endpoints.queue.Get()
|
||||
if e, a := ns+"/foo", got; e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000, Protocol: "TCP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 0)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 0)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
||||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "1",
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1)
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItems(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, ns, 3, 2)
|
||||
addPods(endpoints.podStore.Store, "blah", 5, 2) // make sure these aren't found!
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{
|
||||
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)},
|
||||
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)},
|
||||
},
|
||||
},
|
||||
})
|
||||
endpoints.syncService("other/foo")
|
||||
expectedSubsets := []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
},
|
||||
Ports: []api.EndpointPort{
|
||||
{Name: "port0", Port: 8080, Protocol: "TCP"},
|
||||
{Name: "port1", Port: 8088, Protocol: "TCP"},
|
||||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
},
|
||||
Subsets: endptspkg.SortSubsets(expectedSubsets),
|
||||
})
|
||||
// endpointsHandler should get 2 requests - one for "GET" and the next for "POST".
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, ""), "POST", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, ns, 3, 2)
|
||||
serviceLabels := map[string]string{"foo": "bar"}
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{
|
||||
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)},
|
||||
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)},
|
||||
},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
expectedSubsets := []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
},
|
||||
Ports: []api.EndpointPort{
|
||||
{Name: "port0", Port: 8080, Protocol: "TCP"},
|
||||
{Name: "port1", Port: 8088, Protocol: "TCP"},
|
||||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Subsets: endptspkg.SortSubsets(expectedSubsets),
|
||||
})
|
||||
// endpointsHandler should get 2 requests - one for "GET" and the next for "POST".
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, ""), "POST", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
||||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
endpoints := NewEndpointController(client)
|
||||
addPods(endpoints.podStore.Store, ns, 1, 1)
|
||||
serviceLabels := map[string]string{"baz": "blah"}
|
||||
endpoints.serviceStore.Store.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
18
pkg/controller/namespace/doc.go
Normal file
18
pkg/controller/namespace/doc.go
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// namespace contains a controller that handles namespace lifecycle
|
||||
package namespacecontroller
|
334
pkg/controller/namespace/namespace_controller.go
Normal file
334
pkg/controller/namespace/namespace_controller.go
Normal file
@@ -0,0 +1,334 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespacecontroller
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// NamespaceController is responsible for performing actions dependent upon a namespace phase
|
||||
type NamespaceController struct {
|
||||
controller *framework.Controller
|
||||
StopEverything chan struct{}
|
||||
}
|
||||
|
||||
// NewNamespaceController creates a new NamespaceController
|
||||
func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController {
|
||||
_, controller := framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
|
||||
},
|
||||
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
|
||||
return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
resyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
namespace := obj.(*api.Namespace)
|
||||
err := syncNamespace(kubeClient, *namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
namespace := newObj.(*api.Namespace)
|
||||
err := syncNamespace(kubeClient, *namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
return &NamespaceController{
|
||||
controller: controller,
|
||||
}
|
||||
}
|
||||
|
||||
// Run begins observing the system. It starts a goroutine and returns immediately.
|
||||
func (nm *NamespaceController) Run() {
|
||||
if nm.StopEverything == nil {
|
||||
nm.StopEverything = make(chan struct{})
|
||||
go nm.controller.Run(nm.StopEverything)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shutsdown this controller
|
||||
func (nm *NamespaceController) Stop() {
|
||||
if nm.StopEverything != nil {
|
||||
close(nm.StopEverything)
|
||||
nm.StopEverything = nil
|
||||
}
|
||||
}
|
||||
|
||||
// finalized returns true if the spec.finalizers is empty list
|
||||
func finalized(namespace api.Namespace) bool {
|
||||
return len(namespace.Spec.Finalizers) == 0
|
||||
}
|
||||
|
||||
// finalize will finalize the namespace for kubernetes
|
||||
func finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) {
|
||||
namespaceFinalize := api.Namespace{}
|
||||
namespaceFinalize.ObjectMeta = namespace.ObjectMeta
|
||||
namespaceFinalize.Spec = namespace.Spec
|
||||
finalizerSet := util.NewStringSet()
|
||||
for i := range namespace.Spec.Finalizers {
|
||||
if namespace.Spec.Finalizers[i] != api.FinalizerKubernetes {
|
||||
finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
|
||||
}
|
||||
}
|
||||
namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet))
|
||||
for _, value := range finalizerSet.List() {
|
||||
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))
|
||||
}
|
||||
return kubeClient.Namespaces().Finalize(&namespaceFinalize)
|
||||
}
|
||||
|
||||
// deleteAllContent will delete all content known to the system in a namespace
|
||||
func deleteAllContent(kubeClient client.Interface, namespace string) (err error) {
|
||||
err = deleteServiceAccounts(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteServices(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteReplicationControllers(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deletePods(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteSecrets(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deletePersistentVolumeClaims(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteLimitRanges(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteResourceQuotas(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteEvents(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncNamespace makes namespace life-cycle decisions
|
||||
func syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err error) {
|
||||
if namespace.DeletionTimestamp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if there is a deletion timestamp, and the status is not terminating, then update status
|
||||
if !namespace.DeletionTimestamp.IsZero() && namespace.Status.Phase != api.NamespaceTerminating {
|
||||
newNamespace := api.Namespace{}
|
||||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||
newNamespace.Status = namespace.Status
|
||||
newNamespace.Status.Phase = api.NamespaceTerminating
|
||||
result, err := kubeClient.Namespaces().Status(&newNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// work with the latest copy so we can proceed to clean up right away without another interval
|
||||
namespace = *result
|
||||
}
|
||||
|
||||
// if the namespace is already finalized, delete it
|
||||
if finalized(namespace) {
|
||||
err = kubeClient.Namespaces().Delete(namespace.Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// there may still be content for us to remove
|
||||
err = deleteAllContent(kubeClient, namespace.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we have removed content, so mark it finalized by us
|
||||
result, err := finalize(kubeClient, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now check if all finalizers have reported that we delete now
|
||||
if finalized(*result) {
|
||||
err = kubeClient.Namespaces().Delete(namespace.Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteLimitRanges(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.LimitRanges(ns).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.LimitRanges(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteResourceQuotas(kubeClient client.Interface, ns string) error {
|
||||
resourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range resourceQuotas.Items {
|
||||
err := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteServiceAccounts(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.ServiceAccounts(ns).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.ServiceAccounts(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteServices(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Services(ns).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Services(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deletePods(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Pods(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteEvents(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Events(ns).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Events(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteSecrets(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.Secrets(ns).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Secrets(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error {
|
||||
items, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
158
pkg/controller/namespace/namespace_controller_test.go
Normal file
158
pkg/controller/namespace/namespace_controller_test.go
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespacecontroller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func TestFinalized(t *testing.T) {
|
||||
testNamespace := api.Namespace{
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"a", "b"},
|
||||
},
|
||||
}
|
||||
if finalized(testNamespace) {
|
||||
t.Errorf("Unexpected result, namespace is not finalized")
|
||||
}
|
||||
testNamespace.Spec.Finalizers = []api.FinalizerName{}
|
||||
if !finalized(testNamespace) {
|
||||
t.Errorf("Expected object to be finalized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalize(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
testNamespace := api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes", "other"},
|
||||
},
|
||||
}
|
||||
finalize(mockClient, testNamespace)
|
||||
actions := mockClient.Actions()
|
||||
if len(actions) != 1 {
|
||||
t.Errorf("Expected 1 mock client action, but got %v", len(actions))
|
||||
}
|
||||
if actions[0].Action != "finalize-namespace" {
|
||||
t.Errorf("Expected finalize-namespace action %v", actions[0].Action)
|
||||
}
|
||||
finalizers := actions[0].Value.(*api.Namespace).Spec.Finalizers
|
||||
if len(finalizers) != 1 {
|
||||
t.Errorf("There should be a single finalizer remaining")
|
||||
}
|
||||
if "other" != string(finalizers[0]) {
|
||||
t.Errorf("Unexpected finalizer value, %v", finalizers[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncNamespaceThatIsTerminating(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
now := util.Now()
|
||||
testNamespace := api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
DeletionTimestamp: &now,
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes"},
|
||||
},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceTerminating,
|
||||
},
|
||||
}
|
||||
err := syncNamespace(mockClient, testNamespace)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when synching namespace %v", err)
|
||||
}
|
||||
// TODO: Reuse the constants for all these strings from testclient
|
||||
expectedActionSet := util.NewStringSet(
|
||||
testclient.ListControllerAction,
|
||||
"list-services",
|
||||
"list-pods",
|
||||
"list-resourceQuotas",
|
||||
"list-secrets",
|
||||
"list-limitRanges",
|
||||
"list-events",
|
||||
"finalize-namespace",
|
||||
"delete-namespace")
|
||||
actionSet := util.NewStringSet()
|
||||
for _, action := range mockClient.Actions() {
|
||||
actionSet.Insert(action.Action)
|
||||
}
|
||||
if !actionSet.HasAll(expectedActionSet.List()...) {
|
||||
t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncNamespaceThatIsActive(t *testing.T) {
|
||||
mockClient := &testclient.Fake{}
|
||||
testNamespace := api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes"},
|
||||
},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceActive,
|
||||
},
|
||||
}
|
||||
err := syncNamespace(mockClient, testNamespace)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when synching namespace %v", err)
|
||||
}
|
||||
actionSet := util.NewStringSet()
|
||||
for _, action := range mockClient.Actions() {
|
||||
actionSet.Insert(action.Action)
|
||||
}
|
||||
if len(actionSet) != 0 {
|
||||
t.Errorf("Expected no action from controller, but got: %v", actionSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStop(t *testing.T) {
|
||||
o := testclient.NewObjects(api.Scheme, api.Scheme)
|
||||
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)}
|
||||
nsController := NewNamespaceController(client, 1*time.Second)
|
||||
|
||||
if nsController.StopEverything != nil {
|
||||
t.Errorf("Non-running manager should not have a stop channel. Got %v", nsController.StopEverything)
|
||||
}
|
||||
|
||||
nsController.Run()
|
||||
|
||||
if nsController.StopEverything == nil {
|
||||
t.Errorf("Running manager should have a stop channel. Got nil")
|
||||
}
|
||||
|
||||
nsController.Stop()
|
||||
|
||||
if nsController.StopEverything != nil {
|
||||
t.Errorf("Non-running manager should not have a stop channel. Got %v", nsController.StopEverything)
|
||||
}
|
||||
}
|
19
pkg/controller/node/doc.go
Normal file
19
pkg/controller/node/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package nodecontroller contains code for syncing cloud instances with
|
||||
// minion registry
|
||||
package nodecontroller
|
476
pkg/controller/node/nodecontroller.go
Normal file
476
pkg/controller/node/nodecontroller.go
Normal file
@@ -0,0 +1,476 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodecontroller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRegistration = errors.New("unable to register all nodes.")
|
||||
ErrQueryIPAddress = errors.New("unable to query IP address.")
|
||||
ErrCloudInstance = errors.New("cloud provider doesn't support instances.")
|
||||
)
|
||||
|
||||
const (
|
||||
// nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update.
|
||||
nodeStatusUpdateRetry = 5
|
||||
// controls how often NodeController will try to evict Pods from non-responsive Nodes.
|
||||
nodeEvictionPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type nodeStatusData struct {
|
||||
probeTimestamp util.Time
|
||||
readyTransitionTimestamp util.Time
|
||||
status api.NodeStatus
|
||||
}
|
||||
|
||||
type NodeController struct {
|
||||
cloud cloudprovider.Interface
|
||||
kubeClient client.Interface
|
||||
recorder record.EventRecorder
|
||||
registerRetryCount int
|
||||
podEvictionTimeout time.Duration
|
||||
deletingPodsRateLimiter util.RateLimiter
|
||||
// worker that evicts pods from unresponsive nodes.
|
||||
podEvictor *PodEvictor
|
||||
|
||||
// per Node map storing last observed Status together with a local time when it was observed.
|
||||
// This timestamp is to be used instead of LastProbeTime stored in Condition. We do this
|
||||
// to aviod the problem with time skew across the cluster.
|
||||
nodeStatusMap map[string]nodeStatusData
|
||||
// Value used if sync_nodes_status=False. NodeController will not proactively
|
||||
// sync node status in this case, but will monitor node status updated from kubelet. If
|
||||
// it doesn't receive update for this amount of time, it will start posting "NodeReady==
|
||||
// ConditionUnknown". The amount of time before which NodeController start evicting pods
|
||||
// is controlled via flag 'pod_eviction_timeout'.
|
||||
// Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency
|
||||
// in kubelet. There are several constraints:
|
||||
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
|
||||
// N means number of retries allowed for kubelet to post node status. It is pointless
|
||||
// to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
|
||||
// will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
|
||||
// The constant must be less than podEvictionTimeout.
|
||||
// 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes
|
||||
// longer for user to see up-to-date node status.
|
||||
nodeMonitorGracePeriod time.Duration
|
||||
// Value used if sync_nodes_status=False, only for node startup. When node
|
||||
// is just created, e.g. cluster bootstrap or node creation, we give a longer grace period.
|
||||
nodeStartupGracePeriod time.Duration
|
||||
// Value controlling NodeController monitoring period, i.e. how often does NodeController
|
||||
// check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod.
|
||||
// TODO: Change node status monitor to watch based.
|
||||
nodeMonitorPeriod time.Duration
|
||||
clusterCIDR *net.IPNet
|
||||
allocateNodeCIDRs bool
|
||||
// Method for easy mocking in unittest.
|
||||
lookupIP func(host string) ([]net.IP, error)
|
||||
now func() util.Time
|
||||
}
|
||||
|
||||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||
func NewNodeController(
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient client.Interface,
|
||||
registerRetryCount int,
|
||||
podEvictionTimeout time.Duration,
|
||||
podEvictor *PodEvictor,
|
||||
nodeMonitorGracePeriod time.Duration,
|
||||
nodeStartupGracePeriod time.Duration,
|
||||
nodeMonitorPeriod time.Duration,
|
||||
clusterCIDR *net.IPNet,
|
||||
allocateNodeCIDRs bool) *NodeController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
if allocateNodeCIDRs && clusterCIDR == nil {
|
||||
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
return &NodeController{
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
recorder: recorder,
|
||||
registerRetryCount: registerRetryCount,
|
||||
podEvictionTimeout: podEvictionTimeout,
|
||||
podEvictor: podEvictor,
|
||||
nodeStatusMap: make(map[string]nodeStatusData),
|
||||
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
|
||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
||||
lookupIP: net.LookupIP,
|
||||
now: util.Now,
|
||||
clusterCIDR: clusterCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
}
|
||||
}
|
||||
|
||||
// Generates num pod CIDRs that could be assigned to nodes.
|
||||
func generateCIDRs(clusterCIDR *net.IPNet, num int) util.StringSet {
|
||||
res := util.NewStringSet()
|
||||
cidrIP := clusterCIDR.IP.To4()
|
||||
for i := 0; i < num; i++ {
|
||||
// TODO: Make the CIDRs configurable.
|
||||
b1 := byte(i >> 8)
|
||||
b2 := byte(i % 256)
|
||||
res.Insert(fmt.Sprintf("%d.%d.%d.0/24", cidrIP[0], cidrIP[1]+b1, cidrIP[2]+b2))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// reconcileNodeCIDRs looks at each node and assigns it a valid CIDR
|
||||
// if it doesn't currently have one.
|
||||
func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
|
||||
glog.V(4).Infof("Reconciling cidrs for %d nodes", len(nodes.Items))
|
||||
// TODO(roberthbailey): This seems inefficient. Why re-calculate CIDRs
|
||||
// on each sync period?
|
||||
availableCIDRs := generateCIDRs(nc.clusterCIDR, len(nodes.Items))
|
||||
for _, node := range nodes.Items {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.V(4).Infof("CIDR %s is already being used by node %s", node.Spec.PodCIDR, node.Name)
|
||||
availableCIDRs.Delete(node.Spec.PodCIDR)
|
||||
}
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
podCIDR, found := availableCIDRs.PopAny()
|
||||
if !found {
|
||||
nc.recordNodeStatusChange(&node, "No available CIDR")
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR)
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err := nc.kubeClient.Nodes().Update(&node); err != nil {
|
||||
nc.recordNodeStatusChange(&node, "CIDR assignment failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *NodeController) Run(period time.Duration) {
|
||||
// Incorporate the results of node status pushed from kubelet to master.
|
||||
go util.Forever(func() {
|
||||
if err := nc.monitorNodeStatus(); err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
}
|
||||
}, nc.nodeMonitorPeriod)
|
||||
|
||||
go util.Forever(func() {
|
||||
nc.podEvictor.TryEvict(func(nodeName string) { nc.deletePods(nodeName) })
|
||||
}, nodeEvictionPeriod)
|
||||
}
|
||||
|
||||
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) {
|
||||
ref := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: node.Name,
|
||||
UID: types.UID(node.Name),
|
||||
Namespace: "",
|
||||
}
|
||||
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
nc.recorder.Eventf(ref, new_status, "Node %s status is now: %s", node.Name, new_status)
|
||||
}
|
||||
|
||||
func (nc *NodeController) recordNodeEvent(nodeName string, event string) {
|
||||
ref := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeName),
|
||||
Namespace: "",
|
||||
}
|
||||
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
|
||||
nc.recorder.Eventf(ref, event, "Node %s event: %s", nodeName, event)
|
||||
}
|
||||
|
||||
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
||||
// is entitled, state of current and last observed Ready Condition, and an error if it ocured.
|
||||
func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, api.NodeCondition, *api.NodeCondition, error) {
|
||||
var err error
|
||||
var gracePeriod time.Duration
|
||||
var lastReadyCondition api.NodeCondition
|
||||
readyCondition := nc.getCondition(&node.Status, api.NodeReady)
|
||||
if readyCondition == nil {
|
||||
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
||||
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
|
||||
// to node.CreationTimestamp to avoid handle the corner case.
|
||||
lastReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
LastHeartbeatTime: node.CreationTimestamp,
|
||||
LastTransitionTime: node.CreationTimestamp,
|
||||
}
|
||||
gracePeriod = nc.nodeStartupGracePeriod
|
||||
nc.nodeStatusMap[node.Name] = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: node.CreationTimestamp,
|
||||
readyTransitionTimestamp: node.CreationTimestamp,
|
||||
}
|
||||
} else {
|
||||
// If ready condition is not nil, make a copy of it, since we may modify it in place later.
|
||||
lastReadyCondition = *readyCondition
|
||||
gracePeriod = nc.nodeMonitorGracePeriod
|
||||
}
|
||||
|
||||
savedNodeStatus, found := nc.nodeStatusMap[node.Name]
|
||||
// There are following cases to check:
|
||||
// - both saved and new status have no Ready Condition set - we leave everything as it is,
|
||||
// - saved status have no Ready Condition, but current one does - NodeController was restarted with Node data already present in etcd,
|
||||
// - saved status have some Ready Condition, but current one does not - it's an error, but we fill it up because that's probably a good thing to do,
|
||||
// - both saved and current statuses have Ready Conditions and they have the same LastProbeTime - nothing happened on that Node, it may be
|
||||
// unresponsive, so we leave it as it is,
|
||||
// - both saved and current statuses have Ready Conditions, they have different LastProbeTimes, but the same Ready Condition State -
|
||||
// everything's in order, no transition occurred, we update only probeTimestamp,
|
||||
// - both saved and current statuses have Ready Conditions, different LastProbeTimes and different Ready Condition State -
|
||||
// Ready Condition changed it state since we last seen it, so we update both probeTimestamp and readyTransitionTimestamp.
|
||||
// TODO: things to consider:
|
||||
// - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
|
||||
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
|
||||
// if that's the case, but it does not seem necessary.
|
||||
savedCondition := nc.getCondition(&savedNodeStatus.status, api.NodeReady)
|
||||
observedCondition := nc.getCondition(&node.Status, api.NodeReady)
|
||||
if !found {
|
||||
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
savedNodeStatus = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
nc.nodeStatusMap[node.Name] = savedNodeStatus
|
||||
} else if savedCondition == nil && observedCondition != nil {
|
||||
glog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name)
|
||||
savedNodeStatus = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
nc.nodeStatusMap[node.Name] = savedNodeStatus
|
||||
} else if savedCondition != nil && observedCondition == nil {
|
||||
glog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name)
|
||||
// TODO: figure out what to do in this case. For now we do the same thing as above.
|
||||
savedNodeStatus = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
nc.nodeStatusMap[node.Name] = savedNodeStatus
|
||||
} else if savedCondition != nil && observedCondition != nil && savedCondition.LastHeartbeatTime != observedCondition.LastHeartbeatTime {
|
||||
var transitionTime util.Time
|
||||
// If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now",
|
||||
// otherwise we leave it as it is.
|
||||
if savedCondition.LastTransitionTime != observedCondition.LastTransitionTime {
|
||||
glog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition.Status, observedCondition)
|
||||
|
||||
transitionTime = nc.now()
|
||||
} else {
|
||||
transitionTime = savedNodeStatus.readyTransitionTimestamp
|
||||
}
|
||||
glog.V(3).Infof("Nodes ReadyCondition updated. Updating timestamp: %+v\n vs %+v.", savedNodeStatus.status, node.Status)
|
||||
savedNodeStatus = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: transitionTime,
|
||||
}
|
||||
nc.nodeStatusMap[node.Name] = savedNodeStatus
|
||||
}
|
||||
|
||||
if nc.now().After(savedNodeStatus.probeTimestamp.Add(gracePeriod)) {
|
||||
// NodeReady condition was last set longer ago than gracePeriod, so update it to Unknown
|
||||
// (regardless of its current value) in the master, without contacting kubelet.
|
||||
if readyCondition == nil {
|
||||
glog.V(2).Infof("node %v is never updated by kubelet", node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("Kubelet never posted node status."),
|
||||
LastHeartbeatTime: node.CreationTimestamp,
|
||||
LastTransitionTime: nc.now(),
|
||||
})
|
||||
} else {
|
||||
glog.V(2).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
|
||||
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), lastReadyCondition)
|
||||
if lastReadyCondition.Status != api.ConditionUnknown {
|
||||
readyCondition.Status = api.ConditionUnknown
|
||||
readyCondition.Reason = fmt.Sprintf("Kubelet stopped posting node status.")
|
||||
// LastProbeTime is the last time we heard from kubelet.
|
||||
readyCondition.LastHeartbeatTime = lastReadyCondition.LastHeartbeatTime
|
||||
readyCondition.LastTransitionTime = nc.now()
|
||||
}
|
||||
}
|
||||
if !api.Semantic.DeepEqual(nc.getCondition(&node.Status, api.NodeReady), lastReadyCondition) {
|
||||
if _, err = nc.kubeClient.Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
return gracePeriod, lastReadyCondition, readyCondition, err
|
||||
} else {
|
||||
nc.nodeStatusMap[node.Name] = nodeStatusData{
|
||||
status: node.Status,
|
||||
probeTimestamp: nc.nodeStatusMap[node.Name].probeTimestamp,
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
return gracePeriod, lastReadyCondition, readyCondition, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gracePeriod, lastReadyCondition, readyCondition, err
|
||||
}
|
||||
|
||||
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
|
||||
// post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or
|
||||
// not reachable for a long period of time.
|
||||
func (nc *NodeController) monitorNodeStatus() error {
|
||||
nodes, err := nc.kubeClient.Nodes().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nc.allocateNodeCIDRs {
|
||||
// TODO (cjcullen): Use pkg/controller/framework to watch nodes and
|
||||
// reduce lists/decouple this from monitoring status.
|
||||
nc.reconcileNodeCIDRs(nodes)
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
var gracePeriod time.Duration
|
||||
var lastReadyCondition api.NodeCondition
|
||||
var readyCondition *api.NodeCondition
|
||||
node := &nodes.Items[i]
|
||||
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
|
||||
gracePeriod, lastReadyCondition, readyCondition, err = nc.tryUpdateNodeStatus(node)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
name := node.Name
|
||||
node, err = nc.kubeClient.Nodes().Get(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Update status of Node %v from NodeController exceeds retry count."+
|
||||
"Skipping - no pods will be evicted.", node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
decisionTimestamp := nc.now()
|
||||
|
||||
if readyCondition != nil {
|
||||
// Check eviction timeout against decisionTimestamp
|
||||
if lastReadyCondition.Status == api.ConditionFalse &&
|
||||
decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
if nc.podEvictor.AddNodeToEvict(node.Name) {
|
||||
glog.Infof("Adding pods to evict: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
|
||||
}
|
||||
}
|
||||
if lastReadyCondition.Status == api.ConditionUnknown &&
|
||||
decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout-gracePeriod)) {
|
||||
if nc.podEvictor.AddNodeToEvict(node.Name) {
|
||||
glog.Infof("Adding pods to evict2: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
|
||||
}
|
||||
}
|
||||
if lastReadyCondition.Status == api.ConditionTrue {
|
||||
if nc.podEvictor.RemoveNodeToEvict(node.Name) {
|
||||
glog.Infof("Pods on %v won't be evicted", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Report node event.
|
||||
if readyCondition.Status != api.ConditionTrue && lastReadyCondition.Status == api.ConditionTrue {
|
||||
nc.recordNodeStatusChange(node, "NodeNotReady")
|
||||
}
|
||||
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node and all pods scheduled on the node.
|
||||
if readyCondition.Status != api.ConditionTrue && nc.cloud != nil {
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
glog.Errorf("%v", ErrCloudInstance)
|
||||
continue
|
||||
}
|
||||
if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound {
|
||||
glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
|
||||
nc.recordNodeEvent(node.Name, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
|
||||
if err := nc.kubeClient.Nodes().Delete(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete node %s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
if err := nc.deletePods(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete pods from node %s: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletePods will delete all pods from master running on given node.
|
||||
func (nc *NodeController) deletePods(nodeID string) error {
|
||||
glog.V(2).Infof("Delete all pods from %v", nodeID)
|
||||
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(),
|
||||
fields.OneTermEqualSelector(client.PodHost, nodeID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nc.recordNodeEvent(nodeID, fmt.Sprintf("Deleting all Pods from Node %v.", nodeID))
|
||||
for _, pod := range pods.Items {
|
||||
// Defensive check, also needed for tests.
|
||||
if pod.Spec.NodeName != nodeID {
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("Delete pod %v", pod.Name)
|
||||
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Deleting Pod %s from Node %s", pod.Name, nodeID)
|
||||
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
glog.Errorf("Error deleting pod %v: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCondition returns a condition object for the specific condition
|
||||
// type, nil if the condition is not set.
|
||||
func (nc *NodeController) getCondition(status *api.NodeStatus, conditionType api.NodeConditionType) *api.NodeCondition {
|
||||
if status == nil {
|
||||
return nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
612
pkg/controller/node/nodecontroller_test.go
Normal file
612
pkg/controller/node/nodecontroller_test.go
Normal file
@@ -0,0 +1,612 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodecontroller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
const (
|
||||
testNodeMonitorGracePeriod = 40 * time.Second
|
||||
testNodeStartupGracePeriod = 60 * time.Second
|
||||
testNodeMonitorPeriod = 5 * time.Second
|
||||
)
|
||||
|
||||
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
||||
// allows test cases to have fine-grained control over mock behaviors. We also need
|
||||
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
||||
// the embeded client.Fake field.
|
||||
type FakeNodeHandler struct {
|
||||
*testclient.Fake
|
||||
|
||||
// Input: Hooks determine if request is valid or not
|
||||
CreateHook func(*FakeNodeHandler, *api.Node) bool
|
||||
Existing []*api.Node
|
||||
|
||||
// Output
|
||||
CreatedNodes []*api.Node
|
||||
DeletedNodes []*api.Node
|
||||
UpdatedNodes []*api.Node
|
||||
UpdatedNodeStatuses []*api.Node
|
||||
RequestCount int
|
||||
|
||||
// Synchronization
|
||||
createLock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) Nodes() client.NodeInterface {
|
||||
return c
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
m.createLock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
m.createLock.Unlock()
|
||||
}()
|
||||
for _, n := range m.Existing {
|
||||
if n.Name == node.Name {
|
||||
return nil, apierrors.NewAlreadyExists("Minion", node.Name)
|
||||
}
|
||||
}
|
||||
if m.CreateHook == nil || m.CreateHook(m, node) {
|
||||
nodeCopy := *node
|
||||
m.CreatedNodes = append(m.CreatedNodes, &nodeCopy)
|
||||
return node, nil
|
||||
} else {
|
||||
return nil, errors.New("Create error.")
|
||||
}
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) List(label labels.Selector, field fields.Selector) (*api.NodeList, error) {
|
||||
defer func() { m.RequestCount++ }()
|
||||
var nodes []*api.Node
|
||||
for i := 0; i < len(m.UpdatedNodes); i++ {
|
||||
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
|
||||
nodes = append(nodes, m.UpdatedNodes[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(m.Existing); i++ {
|
||||
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.Existing[i], nodes) {
|
||||
nodes = append(nodes, m.Existing[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(m.CreatedNodes); i++ {
|
||||
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.CreatedNodes[i], nodes) {
|
||||
nodes = append(nodes, m.CreatedNodes[i])
|
||||
}
|
||||
}
|
||||
nodeList := &api.NodeList{}
|
||||
for _, node := range nodes {
|
||||
nodeList.Items = append(nodeList.Items, *node)
|
||||
}
|
||||
return nodeList, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Delete(id string) error {
|
||||
m.DeletedNodes = append(m.DeletedNodes, newNode(id))
|
||||
m.RequestCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
|
||||
nodeCopy := *node
|
||||
m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy)
|
||||
m.RequestCount++
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
|
||||
nodeCopy := *node
|
||||
m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy)
|
||||
m.RequestCount++
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
fakeNow := util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
evictionTimeout := 10 * time.Minute
|
||||
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
timeToPass time.Duration
|
||||
newNodeStatus api.NodeStatus
|
||||
expectedEvictPods bool
|
||||
description string
|
||||
}{
|
||||
// Node created recently, with no status (happens only at cluster startup).
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: 0,
|
||||
newNodeStatus: api.NodeStatus{},
|
||||
expectedEvictPods: false,
|
||||
description: "Node created recently, with no status.",
|
||||
},
|
||||
// Node created long time ago, and kubelet posted NotReady for a short period of time.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: evictionTimeout,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
// Node status has just been updated, and is NotReady for 10min.
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictPods: false,
|
||||
description: "Node created long time ago, and kubelet posted NotReady for a short period of time.",
|
||||
},
|
||||
// Node created long time ago, and kubelet posted NotReady for a long period of time.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: time.Hour,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
// Node status has just been updated, and is NotReady for 1hr.
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictPods: true,
|
||||
description: "Node created long time ago, and kubelet posted NotReady for a long period of time.",
|
||||
},
|
||||
// Node created long time ago, node controller posted Unknown for a short period of time.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: evictionTimeout - testNodeMonitorGracePeriod,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
// Node status was updated by nodecontroller 10min ago
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictPods: false,
|
||||
description: "Node created long time ago, node controller posted Unknown for a short period of time.",
|
||||
},
|
||||
// Node created long time ago, node controller posted Unknown for a long period of time.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: 60 * time.Minute,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
// Node status was updated by nodecontroller 1hr ago
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictPods: true,
|
||||
description: "Node created long time ago, node controller posted Unknown for a long period of time.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
podEvictor := NewPodEvictor(util.NewFakeRateLimiter())
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 10,
|
||||
evictionTimeout, podEvictor, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.timeToPass > 0 {
|
||||
nodeController.now = func() util.Time { return util.Time{Time: fakeNow.Add(item.timeToPass)} }
|
||||
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor.TryEvict(func(nodeName string) { nodeController.deletePods(nodeName) })
|
||||
podEvicted := false
|
||||
for _, action := range item.fakeNodeHandler.Actions() {
|
||||
if action.Action == "delete-pod" {
|
||||
podEvicted = true
|
||||
}
|
||||
}
|
||||
|
||||
if item.expectedEvictPods != podEvicted {
|
||||
t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods,
|
||||
podEvicted, item.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
fakeNow := util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
timeToPass time.Duration
|
||||
newNodeStatus api.NodeStatus
|
||||
expectedEvictPods bool
|
||||
expectedRequestCount int
|
||||
expectedNodes []*api.Node
|
||||
}{
|
||||
// Node created long time ago, without status:
|
||||
// Expect Unknown status posted from node controller.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 2, // List+Update
|
||||
expectedNodes: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("Kubelet never posted node status."),
|
||||
LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: fakeNow,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node created recently, without status.
|
||||
// Expect no action from node controller (within startup grace period).
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 1, // List
|
||||
expectedNodes: nil,
|
||||
},
|
||||
// Node created long time ago, with status updated by kubelet exceeds grace period.
|
||||
// Expect Unknown status posted from node controller.
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 3, // (List+)List+Update
|
||||
timeToPass: time.Hour,
|
||||
newNodeStatus: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
expectedNodes: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("Kubelet stopped posting node status."),
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Time{util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node created long time ago, with status updated recently.
|
||||
// Expect no action from node controller (within monitor grace period).
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
// Node status has just been updated.
|
||||
LastHeartbeatTime: fakeNow,
|
||||
LastTransitionTime: fakeNow,
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
expectedRequestCount: 1, // List
|
||||
expectedNodes: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 10, 5*time.Minute, NewPodEvictor(util.NewFakeRateLimiter()),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.timeToPass > 0 {
|
||||
nodeController.now = func() util.Time { return util.Time{Time: fakeNow.Add(item.timeToPass)} }
|
||||
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
|
||||
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
|
||||
t.Errorf("expected nodes %+v, got %+v", item.expectedNodes[0],
|
||||
item.fakeNodeHandler.UpdatedNodes[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newNode(name string) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: name},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: name,
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name, host string) *api.Pod {
|
||||
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{NodeName: host}}
|
||||
}
|
||||
|
||||
func sortedNodeNames(nodes []*api.Node) []string {
|
||||
nodeNames := []string{}
|
||||
for _, node := range nodes {
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
sort.Strings(nodeNames)
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func sortedNodeAddresses(nodes []*api.Node) []string {
|
||||
nodeAddresses := []string{}
|
||||
for _, node := range nodes {
|
||||
for _, addr := range node.Status.Addresses {
|
||||
nodeAddresses = append(nodeAddresses, addr.Address)
|
||||
}
|
||||
}
|
||||
sort.Strings(nodeAddresses)
|
||||
return nodeAddresses
|
||||
}
|
||||
|
||||
func sortedNodeExternalIDs(nodes []*api.Node) []string {
|
||||
nodeExternalIDs := []string{}
|
||||
for _, node := range nodes {
|
||||
nodeExternalIDs = append(nodeExternalIDs, node.Spec.ExternalID)
|
||||
}
|
||||
sort.Strings(nodeExternalIDs)
|
||||
return nodeExternalIDs
|
||||
}
|
||||
|
||||
func contains(node *api.Node, nodes []*api.Node) bool {
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
if node.Name == nodes[i].Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
129
pkg/controller/node/podevictor.go
Normal file
129
pkg/controller/node/podevictor.go
Normal file
@@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodecontroller
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// A FIFO queue which additionally guarantees that any element can be added only once until
|
||||
// it is removed.
|
||||
type UniqueQueue struct {
|
||||
lock sync.Mutex
|
||||
queue []string
|
||||
set util.StringSet
|
||||
}
|
||||
|
||||
// Entity responsible for evicting Pods from inserted Nodes. It uses RateLimiter to avoid
|
||||
// evicting everything at once. Note that we rate limit eviction of Nodes not individual Pods.
|
||||
type PodEvictor struct {
|
||||
queue UniqueQueue
|
||||
deletingPodsRateLimiter util.RateLimiter
|
||||
}
|
||||
|
||||
// Adds a new value to the queue if it wasn't added before, or was explicitly removed by the
|
||||
// Remove call. Returns true if new value was added.
|
||||
func (q *UniqueQueue) Add(value string) bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
if !q.set.Has(value) {
|
||||
q.queue = append(q.queue, value)
|
||||
q.set.Insert(value)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Removes the value from the queue, so Get() call won't return it, and allow subsequent addition
|
||||
// of the given value. If the value is not present does nothing and returns false.
|
||||
func (q *UniqueQueue) Remove(value string) bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
q.set.Delete(value)
|
||||
for i, val := range q.queue {
|
||||
if val == value {
|
||||
if i > 0 && i < len(q.queue)-1 {
|
||||
q.queue = append(q.queue[0:i], q.queue[i+1:len(q.queue)]...)
|
||||
} else if i > 0 {
|
||||
q.queue = q.queue[0 : len(q.queue)-1]
|
||||
} else {
|
||||
q.queue = q.queue[1:len(q.queue)]
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the oldest added value that wasn't returned yet.
|
||||
func (q *UniqueQueue) Get() (string, bool) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
if len(q.queue) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
result := q.queue[0]
|
||||
q.queue = q.queue[1:len(q.queue)]
|
||||
return result, true
|
||||
}
|
||||
|
||||
// Creates new PodEvictor which will use given RateLimiter to oversee eviction.
|
||||
func NewPodEvictor(deletingPodsRateLimiter util.RateLimiter) *PodEvictor {
|
||||
return &PodEvictor{
|
||||
queue: UniqueQueue{
|
||||
queue: make([]string, 0),
|
||||
set: util.NewStringSet(),
|
||||
},
|
||||
deletingPodsRateLimiter: deletingPodsRateLimiter,
|
||||
}
|
||||
}
|
||||
|
||||
// Tries to evict all Pods from previously inserted Nodes. Ends prematurely if RateLimiter forbids any eviction.
|
||||
// Each Node is processed only once, as long as it's not Removed, i.e. calling multiple AddNodeToEvict does not result
|
||||
// with multiple evictions as long as RemoveNodeToEvict is not called.
|
||||
func (pe *PodEvictor) TryEvict(delFunc func(string)) {
|
||||
val, ok := pe.queue.Get()
|
||||
for ok {
|
||||
if pe.deletingPodsRateLimiter.CanAccept() {
|
||||
glog.Infof("PodEvictor is evicting Pods on Node: %v", val)
|
||||
delFunc(val)
|
||||
} else {
|
||||
glog.V(1).Info("PodEvictor is rate limitted.")
|
||||
break
|
||||
}
|
||||
val, ok = pe.queue.Get()
|
||||
}
|
||||
}
|
||||
|
||||
// Adds Node to the Evictor to be processed later. Won't add the same Node second time if it was already
|
||||
// added and not removed.
|
||||
func (pe *PodEvictor) AddNodeToEvict(nodeName string) bool {
|
||||
return pe.queue.Add(nodeName)
|
||||
}
|
||||
|
||||
// Removes Node from the Evictor. The Node won't be processed until added again.
|
||||
func (pe *PodEvictor) RemoveNodeToEvict(nodeName string) bool {
|
||||
return pe.queue.Remove(nodeName)
|
||||
}
|
146
pkg/controller/node/podevictor_test.go
Normal file
146
pkg/controller/node/podevictor_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodecontroller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func CheckQueueEq(lhs, rhs []string) bool {
|
||||
for i := 0; i < len(lhs); i++ {
|
||||
if rhs[i] != lhs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func CheckSetEq(lhs, rhs util.StringSet) bool {
|
||||
return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...)
|
||||
}
|
||||
|
||||
func TestAddNode(t *testing.T) {
|
||||
evictor := NewPodEvictor(util.NewFakeRateLimiter())
|
||||
evictor.AddNodeToEvict("first")
|
||||
evictor.AddNodeToEvict("second")
|
||||
evictor.AddNodeToEvict("third")
|
||||
|
||||
queuePattern := []string{"first", "second", "third"}
|
||||
if len(evictor.queue.queue) != len(queuePattern) {
|
||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
||||
}
|
||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||
}
|
||||
|
||||
setPattern := util.NewStringSet("first", "second", "third")
|
||||
if len(evictor.queue.set) != len(setPattern) {
|
||||
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
|
||||
}
|
||||
if !CheckSetEq(setPattern, evictor.queue.set) {
|
||||
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelNode(t *testing.T) {
|
||||
evictor := NewPodEvictor(util.NewFakeRateLimiter())
|
||||
evictor.AddNodeToEvict("first")
|
||||
evictor.AddNodeToEvict("second")
|
||||
evictor.AddNodeToEvict("third")
|
||||
evictor.RemoveNodeToEvict("first")
|
||||
|
||||
queuePattern := []string{"second", "third"}
|
||||
if len(evictor.queue.queue) != len(queuePattern) {
|
||||
t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern))
|
||||
}
|
||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||
}
|
||||
|
||||
setPattern := util.NewStringSet("second", "third")
|
||||
if len(evictor.queue.set) != len(setPattern) {
|
||||
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
|
||||
}
|
||||
if !CheckSetEq(setPattern, evictor.queue.set) {
|
||||
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
|
||||
}
|
||||
|
||||
evictor = NewPodEvictor(util.NewFakeRateLimiter())
|
||||
evictor.AddNodeToEvict("first")
|
||||
evictor.AddNodeToEvict("second")
|
||||
evictor.AddNodeToEvict("third")
|
||||
evictor.RemoveNodeToEvict("second")
|
||||
|
||||
queuePattern = []string{"first", "third"}
|
||||
if len(evictor.queue.queue) != len(queuePattern) {
|
||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
||||
}
|
||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||
}
|
||||
|
||||
setPattern = util.NewStringSet("first", "third")
|
||||
if len(evictor.queue.set) != len(setPattern) {
|
||||
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
|
||||
}
|
||||
if !CheckSetEq(setPattern, evictor.queue.set) {
|
||||
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
|
||||
}
|
||||
|
||||
evictor = NewPodEvictor(util.NewFakeRateLimiter())
|
||||
evictor.AddNodeToEvict("first")
|
||||
evictor.AddNodeToEvict("second")
|
||||
evictor.AddNodeToEvict("third")
|
||||
evictor.RemoveNodeToEvict("third")
|
||||
|
||||
queuePattern = []string{"first", "second"}
|
||||
if len(evictor.queue.queue) != len(queuePattern) {
|
||||
t.Fatalf("Queue %v should have lenght %d", evictor.queue.queue, len(queuePattern))
|
||||
}
|
||||
if !CheckQueueEq(queuePattern, evictor.queue.queue) {
|
||||
t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern)
|
||||
}
|
||||
|
||||
setPattern = util.NewStringSet("first", "second")
|
||||
if len(evictor.queue.set) != len(setPattern) {
|
||||
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
|
||||
}
|
||||
if !CheckSetEq(setPattern, evictor.queue.set) {
|
||||
t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictNode(t *testing.T) {
|
||||
evictor := NewPodEvictor(util.NewFakeRateLimiter())
|
||||
evictor.AddNodeToEvict("first")
|
||||
evictor.AddNodeToEvict("second")
|
||||
evictor.AddNodeToEvict("third")
|
||||
evictor.RemoveNodeToEvict("second")
|
||||
|
||||
deletedMap := util.NewStringSet()
|
||||
evictor.TryEvict(func(nodeName string) { deletedMap.Insert(nodeName) })
|
||||
|
||||
setPattern := util.NewStringSet("first", "third")
|
||||
if len(deletedMap) != len(setPattern) {
|
||||
t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
|
||||
}
|
||||
if !CheckSetEq(setPattern, deletedMap) {
|
||||
t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
|
||||
}
|
||||
}
|
@@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package replication contains logic for watching and synchronizing
|
||||
// replication controllers.
|
||||
package replication
|
||||
package replicationcontroller
|
||||
|
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replication
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
@@ -63,6 +63,8 @@ const (
|
||||
|
||||
// ReplicationManager is responsible for synchronizing ReplicationController objects stored
|
||||
// in the system with actual running pods.
|
||||
// TODO: this really should be called ReplicationController. The only reason why it's a Manager
|
||||
// is to distinguish this type from API object "ReplicationController". We should fix this.
|
||||
type ReplicationManager struct {
|
||||
kubeClient client.Interface
|
||||
podControl controller.PodControlInterface
|
||||
|
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replication
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replication
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
|
18
pkg/controller/resourcequota/doc.go
Normal file
18
pkg/controller/resourcequota/doc.go
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// resourcequota contains a controller that makes resource quota usage observations
|
||||
package resourcequotacontroller
|
265
pkg/controller/resourcequota/resource_quota_controller.go
Normal file
265
pkg/controller/resourcequota/resource_quota_controller.go
Normal file
@@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequotacontroller
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ResourceQuotaController is responsible for tracking quota usage status in the system
|
||||
type ResourceQuotaController struct {
|
||||
kubeClient client.Interface
|
||||
syncTime <-chan time.Time
|
||||
|
||||
// To allow injection of syncUsage for testing.
|
||||
syncHandler func(quota api.ResourceQuota) error
|
||||
}
|
||||
|
||||
// NewResourceQuotaController creates a new ResourceQuotaController
|
||||
func NewResourceQuotaController(kubeClient client.Interface) *ResourceQuotaController {
|
||||
|
||||
rm := &ResourceQuotaController{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
|
||||
// set the synchronization handler
|
||||
rm.syncHandler = rm.syncResourceQuota
|
||||
return rm
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
func (rm *ResourceQuotaController) Run(period time.Duration) {
|
||||
rm.syncTime = time.Tick(period)
|
||||
go util.Forever(func() { rm.synchronize() }, period)
|
||||
}
|
||||
|
||||
func (rm *ResourceQuotaController) synchronize() {
|
||||
var resourceQuotas []api.ResourceQuota
|
||||
list, err := rm.kubeClient.ResourceQuotas(api.NamespaceAll).List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Synchronization error: %v (%#v)", err, err)
|
||||
}
|
||||
resourceQuotas = list.Items
|
||||
for ix := range resourceQuotas {
|
||||
glog.V(4).Infof("periodic sync of %v/%v", resourceQuotas[ix].Namespace, resourceQuotas[ix].Name)
|
||||
err := rm.syncHandler(resourceQuotas[ix])
|
||||
if err != nil {
|
||||
glog.Errorf("Error synchronizing: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FilterQuotaPods eliminates pods that no longer have a cost against the quota
|
||||
// pods that have a restart policy of always are always returned
|
||||
// pods that are in a failed state, but have a restart policy of on failure are always returned
|
||||
// pods that are not in a success state or a failure state are included in quota
|
||||
func FilterQuotaPods(pods []api.Pod) []*api.Pod {
|
||||
var result []*api.Pod
|
||||
for i := range pods {
|
||||
value := &pods[i]
|
||||
// a pod that has a restart policy always no matter its state counts against usage
|
||||
if value.Spec.RestartPolicy == api.RestartPolicyAlways {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
// a failed pod with a restart policy of on failure will count against usage
|
||||
if api.PodFailed == value.Status.Phase &&
|
||||
value.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
// if the pod is not succeeded or failed, then we count it against quota
|
||||
if api.PodSucceeded != value.Status.Phase &&
|
||||
api.PodFailed != value.Status.Phase {
|
||||
result = append(result, value)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of current status
|
||||
func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (err error) {
|
||||
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)
|
||||
|
||||
// Create a usage object that is based on the quota resource version
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: quota.Name,
|
||||
Namespace: quota.Namespace,
|
||||
ResourceVersion: quota.ResourceVersion,
|
||||
Labels: quota.Labels,
|
||||
Annotations: quota.Annotations},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{},
|
||||
Used: api.ResourceList{},
|
||||
},
|
||||
}
|
||||
|
||||
// set the hard values supported on the quota
|
||||
for k, v := range quota.Spec.Hard {
|
||||
usage.Status.Hard[k] = *v.Copy()
|
||||
}
|
||||
// set any last known observed status values for usage
|
||||
for k, v := range quota.Status.Used {
|
||||
usage.Status.Used[k] = *v.Copy()
|
||||
}
|
||||
|
||||
set := map[api.ResourceName]bool{}
|
||||
for k := range usage.Status.Hard {
|
||||
set[k] = true
|
||||
}
|
||||
|
||||
pods := &api.PodList{}
|
||||
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
|
||||
pods, err = rm.kubeClient.Pods(usage.Namespace).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
filteredPods := FilterQuotaPods(pods.Items)
|
||||
|
||||
// iterate over each resource, and update observation
|
||||
for k := range usage.Status.Hard {
|
||||
|
||||
// look if there is a used value, if none, we are definitely dirty
|
||||
prevQuantity, found := usage.Status.Used[k]
|
||||
if !found {
|
||||
dirty = true
|
||||
}
|
||||
|
||||
var value *resource.Quantity
|
||||
|
||||
switch k {
|
||||
case api.ResourcePods:
|
||||
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
||||
case api.ResourceMemory:
|
||||
val := int64(0)
|
||||
for _, pod := range filteredPods {
|
||||
val = val + PodMemory(pod).Value()
|
||||
}
|
||||
value = resource.NewQuantity(int64(val), resource.DecimalSI)
|
||||
case api.ResourceCPU:
|
||||
val := int64(0)
|
||||
for _, pod := range filteredPods {
|
||||
val = val + PodCPU(pod).MilliValue()
|
||||
}
|
||||
value = resource.NewMilliQuantity(int64(val), resource.DecimalSI)
|
||||
case api.ResourceServices:
|
||||
items, err := rm.kubeClient.Services(usage.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceReplicationControllers:
|
||||
items, err := rm.kubeClient.ReplicationControllers(usage.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceQuotas:
|
||||
items, err := rm.kubeClient.ResourceQuotas(usage.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceSecrets:
|
||||
items, err := rm.kubeClient.Secrets(usage.Namespace).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourcePersistentVolumeClaims:
|
||||
items, err := rm.kubeClient.PersistentVolumeClaims(usage.Namespace).List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
}
|
||||
|
||||
// ignore fields we do not understand (assume another controller is tracking it)
|
||||
if value != nil {
|
||||
// see if the value has changed
|
||||
dirty = dirty || (value.Value() != prevQuantity.Value())
|
||||
// just update the value
|
||||
usage.Status.Used[k] = *value
|
||||
}
|
||||
}
|
||||
|
||||
// update the usage only if it changed
|
||||
if dirty {
|
||||
_, err = rm.kubeClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodCPU computes total cpu usage of a pod
|
||||
func PodCPU(pod *api.Pod) *resource.Quantity {
|
||||
val := int64(0)
|
||||
for j := range pod.Spec.Containers {
|
||||
val = val + pod.Spec.Containers[j].Resources.Limits.Cpu().MilliValue()
|
||||
}
|
||||
return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
|
||||
}
|
||||
|
||||
// IsPodCPUUnbounded returns true if the cpu use is unbounded for any container in pod
|
||||
func IsPodCPUUnbounded(pod *api.Pod) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := pod.Spec.Containers[j]
|
||||
if container.Resources.Limits.Cpu().MilliValue() == int64(0) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodMemoryUnbounded returns true if the memory use is unbounded for any container in pod
|
||||
func IsPodMemoryUnbounded(pod *api.Pod) bool {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := pod.Spec.Containers[j]
|
||||
if container.Resources.Limits.Memory().Value() == int64(0) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodMemory computes the memory usage of a pod
|
||||
func PodMemory(pod *api.Pod) *resource.Quantity {
|
||||
val := int64(0)
|
||||
for j := range pod.Spec.Containers {
|
||||
val = val + pod.Spec.Containers[j].Resources.Limits.Memory().Value()
|
||||
}
|
||||
return resource.NewQuantity(int64(val), resource.DecimalSI)
|
||||
}
|
330
pkg/controller/resourcequota/resource_quota_controller_test.go
Normal file
330
pkg/controller/resourcequota/resource_quota_controller_test.go
Normal file
@@ -0,0 +1,330 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequotacontroller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func getResourceRequirements(cpu, memory string) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
res.Limits = api.ResourceList{}
|
||||
if cpu != "" {
|
||||
res.Limits[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res.Limits[api.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func TestFilterQuotaPods(t *testing.T) {
|
||||
pods := []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-pending"},
|
||||
Status: api.PodStatus{Phase: api.PodPending},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"},
|
||||
Status: api.PodStatus{Phase: api.PodSucceeded},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-unknown"},
|
||||
Status: api.PodStatus{Phase: api.PodUnknown},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyOnFailure,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
},
|
||||
}
|
||||
expectedResults := util.NewStringSet("pod-running",
|
||||
"pod-pending", "pod-unknown", "pod-failed-with-restart-always",
|
||||
"pod-failed-with-restart-on-failure")
|
||||
|
||||
actualResults := util.StringSet{}
|
||||
result := FilterQuotaPods(pods)
|
||||
for i := range result {
|
||||
actualResults.Insert(result[i].Name)
|
||||
}
|
||||
|
||||
if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) {
|
||||
t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
podList := api.PodList{
|
||||
Items: []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running-2"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
quota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
api.ResourceMemory: resource.MustParse("2147483648"),
|
||||
api.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake(&podList, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient)
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
usage := kubeClient.Actions()[1].Value.(*api.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
actual := usage.Status.Hard[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake("a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient)
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
usage := kubeClient.Actions()[1].Value.(*api.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
actual := usage.Status.Hard[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
for k, v := range expectedUsage.Status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
quota := api.ResourceQuota{
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a)
|
||||
|
||||
ResourceQuotaController := NewResourceQuotaController(kubeClient)
|
||||
err := ResourceQuotaController.syncResourceQuota(quota)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
if len(actions) != 1 && actions[0].Action != "list-pods" {
|
||||
t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodCPUUnbounded(t *testing.T) {
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "0")}},
|
||||
},
|
||||
}
|
||||
if IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
pod = api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
|
||||
},
|
||||
}
|
||||
if !IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
|
||||
if !IsPodCPUUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodMemoryUnbounded(t *testing.T) {
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "1Gi")}},
|
||||
},
|
||||
}
|
||||
if IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
pod = api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
|
||||
},
|
||||
}
|
||||
if !IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
|
||||
if !IsPodMemoryUnbounded(&pod) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
}
|
19
pkg/controller/route/doc.go
Normal file
19
pkg/controller/route/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package routecontroller contains code for syncing cloud routing rules with
|
||||
// the list of registered nodes.
|
||||
package routecontroller
|
128
pkg/controller/route/routecontroller.go
Normal file
128
pkg/controller/route/routecontroller.go
Normal file
@@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routecontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type RouteController struct {
|
||||
routes cloudprovider.Routes
|
||||
kubeClient client.Interface
|
||||
clusterName string
|
||||
clusterCIDR *net.IPNet
|
||||
}
|
||||
|
||||
func New(routes cloudprovider.Routes, kubeClient client.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
||||
return &RouteController{
|
||||
routes: routes,
|
||||
kubeClient: kubeClient,
|
||||
clusterName: clusterName,
|
||||
clusterCIDR: clusterCIDR,
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RouteController) Run(syncPeriod time.Duration) {
|
||||
go util.Forever(func() {
|
||||
if err := rc.reconcileNodeRoutes(); err != nil {
|
||||
glog.Errorf("Couldn't reconcile node routes: %v", err)
|
||||
}
|
||||
}, syncPeriod)
|
||||
}
|
||||
|
||||
func (rc *RouteController) reconcileNodeRoutes() error {
|
||||
routeList, err := rc.routes.ListRoutes(rc.clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing routes: %v", err)
|
||||
}
|
||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
||||
// and reduce the number of lists needed.
|
||||
nodeList, err := rc.kubeClient.Nodes().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing nodes: %v", err)
|
||||
}
|
||||
return rc.reconcile(nodeList.Items, routeList)
|
||||
}
|
||||
|
||||
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error {
|
||||
// nodeCIDRs maps nodeName->nodeCIDR
|
||||
nodeCIDRs := make(map[string]string)
|
||||
// routeMap maps routeTargetInstance->route
|
||||
routeMap := make(map[string]*cloudprovider.Route)
|
||||
for _, route := range routes {
|
||||
routeMap[route.TargetInstance] = route
|
||||
}
|
||||
for _, node := range nodes {
|
||||
// Check if we have a route for this node w/ the correct CIDR.
|
||||
r := routeMap[node.Name]
|
||||
if r == nil || r.DestinationCIDR != node.Spec.PodCIDR {
|
||||
// If not, create the route.
|
||||
route := &cloudprovider.Route{
|
||||
TargetInstance: node.Name,
|
||||
DestinationCIDR: node.Spec.PodCIDR,
|
||||
}
|
||||
nameHint := string(node.UID)
|
||||
go func(nameHint string, route *cloudprovider.Route) {
|
||||
if err := rc.routes.CreateRoute(rc.clusterName, nameHint, route); err != nil {
|
||||
glog.Errorf("Could not create route %s %s: %v", nameHint, route.DestinationCIDR, err)
|
||||
}
|
||||
}(nameHint, route)
|
||||
}
|
||||
nodeCIDRs[node.Name] = node.Spec.PodCIDR
|
||||
}
|
||||
for _, route := range routes {
|
||||
if rc.isResponsibleForRoute(route) {
|
||||
// Check if this route applies to a node we know about & has correct CIDR.
|
||||
if nodeCIDRs[route.TargetInstance] != route.DestinationCIDR {
|
||||
// Delete the route.
|
||||
go func(route *cloudprovider.Route) {
|
||||
if err := rc.routes.DeleteRoute(rc.clusterName, route); err != nil {
|
||||
glog.Errorf("Could not delete route %s %s: %v", route.Name, route.DestinationCIDR, err)
|
||||
}
|
||||
}(route)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) bool {
|
||||
_, cidr, err := net.ParseCIDR(route.DestinationCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err)
|
||||
return false
|
||||
}
|
||||
// Not responsible if this route's CIDR is not within our clusterCIDR
|
||||
lastIP := make([]byte, len(cidr.IP))
|
||||
for i := range lastIP {
|
||||
lastIP[i] = cidr.IP[i] | ^cidr.Mask[i]
|
||||
}
|
||||
if !rc.clusterCIDR.Contains(cidr.IP) || !rc.clusterCIDR.Contains(lastIP) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
208
pkg/controller/route/routecontroller_test.go
Normal file
208
pkg/controller/route/routecontroller_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routecontroller
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||
)
|
||||
|
||||
func TestIsResponsibleForRoute(t *testing.T) {
|
||||
myClusterName := "my-awesome-cluster"
|
||||
myClusterRoute := "my-awesome-cluster-12345678-90ab-cdef-1234-567890abcdef"
|
||||
testCases := []struct {
|
||||
clusterCIDR string
|
||||
routeName string
|
||||
routeCIDR string
|
||||
expectedResponsible bool
|
||||
}{
|
||||
// Routes that belong to this cluster
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.0.0/24", true},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.10.0/24", true},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.255.0/24", true},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.244.0.0/24", true},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.247.255.0/24", true},
|
||||
// Routes that match our naming/tagging scheme, but are outside our cidr
|
||||
{"10.244.0.0/16", myClusterRoute, "10.224.0.0/24", false},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.0.10.0/24", false},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.255.255.0/24", false},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.248.0.0/24", false},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.243.255.0/24", false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
_, cidr, err := net.ParseCIDR(testCase.clusterCIDR)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR)
|
||||
}
|
||||
rc := New(nil, nil, myClusterName, cidr)
|
||||
route := &cloudprovider.Route{
|
||||
Name: testCase.routeName,
|
||||
TargetInstance: "doesnt-matter-for-this-test",
|
||||
DestinationCIDR: testCase.routeCIDR,
|
||||
}
|
||||
if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible {
|
||||
t.Errorf("%d. isResponsibleForRoute() = %t; want %t", i, resp, testCase.expectedResponsible)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcile(t *testing.T) {
|
||||
cluster := "my-k8s"
|
||||
testCases := []struct {
|
||||
nodes []api.Node
|
||||
initialRoutes []*cloudprovider.Route
|
||||
expectedRoutes []*cloudprovider.Route
|
||||
}{
|
||||
// 2 nodes, routes already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, one route already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, no routes yet
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, a few too many routes
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
{cluster + "-03", "node-3", "10.120.2.0/24"},
|
||||
{cluster + "-04", "node-4", "10.120.3.0/24"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, 2 routes, but only 1 is right
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-03", "node-3", "10.120.2.0/24"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
cloud := &fake_cloud.FakeCloud{RouteMap: make(map[string]*fake_cloud.FakeRoute)}
|
||||
for _, route := range testCase.initialRoutes {
|
||||
fakeRoute := &fake_cloud.FakeRoute{}
|
||||
fakeRoute.ClusterName = cluster
|
||||
fakeRoute.Route = *route
|
||||
cloud.RouteMap[route.Name] = fakeRoute
|
||||
}
|
||||
routes, ok := cloud.Routes()
|
||||
if !ok {
|
||||
t.Error("Error in test: fake_cloud doesn't support Routes()")
|
||||
}
|
||||
_, cidr, _ := net.ParseCIDR("10.120.0.0/16")
|
||||
rc := New(routes, nil, cluster, cidr)
|
||||
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
|
||||
t.Errorf("%d. Error from rc.reconcile(): %v", i, err)
|
||||
}
|
||||
var finalRoutes []*cloudprovider.Route
|
||||
var err error
|
||||
timeoutChan := time.After(50 * time.Millisecond)
|
||||
tick := time.NewTicker(10 * time.Millisecond)
|
||||
defer tick.Stop()
|
||||
poll:
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if finalRoutes, err = routes.ListRoutes(cluster); err == nil && routeListEqual(finalRoutes, testCase.expectedRoutes) {
|
||||
break poll
|
||||
}
|
||||
case <-timeoutChan:
|
||||
t.Errorf("%d. rc.reconcile() = %v, routes:\n%v\nexpected: nil, routes:\n%v\n", i, err, flatten(finalRoutes), flatten(testCase.expectedRoutes))
|
||||
break poll
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func routeListEqual(list1, list2 []*cloudprovider.Route) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
routeMap1 := make(map[string]*cloudprovider.Route)
|
||||
for _, route1 := range list1 {
|
||||
routeMap1[route1.Name] = route1
|
||||
}
|
||||
for _, route2 := range list2 {
|
||||
if route1, exists := routeMap1[route2.Name]; !exists || *route1 != *route2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func flatten(list []*cloudprovider.Route) []cloudprovider.Route {
|
||||
var structList []cloudprovider.Route
|
||||
for _, route := range list {
|
||||
structList = append(structList, *route)
|
||||
}
|
||||
return structList
|
||||
}
|
19
pkg/controller/service/doc.go
Normal file
19
pkg/controller/service/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package servicecontroller contains code for syncing cloud load balancers
|
||||
// with the service registry.
|
||||
package servicecontroller
|
671
pkg/controller/service/servicecontroller.go
Normal file
671
pkg/controller/service/servicecontroller.go
Normal file
@@ -0,0 +1,671 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
workerGoroutines = 10
|
||||
|
||||
// How long to wait before retrying the processing of a service change.
|
||||
// If this changes, the sleep in hack/jenkins/e2e.sh before downing a cluster
|
||||
// should be changed appropriately.
|
||||
processingRetryInterval = 5 * time.Second
|
||||
|
||||
clientRetryCount = 5
|
||||
clientRetryInterval = 5 * time.Second
|
||||
|
||||
retryable = true
|
||||
notRetryable = false
|
||||
)
|
||||
|
||||
type cachedService struct {
|
||||
// The last-known state of the service
|
||||
lastState *api.Service
|
||||
// The state as successfully applied to the load balancer
|
||||
appliedState *api.Service
|
||||
|
||||
// Ensures only one goroutine can operate on this service at any given time.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type serviceCache struct {
|
||||
mu sync.Mutex // protects serviceMap
|
||||
serviceMap map[string]*cachedService
|
||||
}
|
||||
|
||||
type ServiceController struct {
|
||||
cloud cloudprovider.Interface
|
||||
kubeClient client.Interface
|
||||
clusterName string
|
||||
balancer cloudprovider.TCPLoadBalancer
|
||||
zone cloudprovider.Zone
|
||||
cache *serviceCache
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
nodeLister cache.StoreToNodeLister
|
||||
}
|
||||
|
||||
// New returns a new service controller to keep cloud provider service resources
|
||||
// (like external load balancers) in sync with the registry.
|
||||
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
|
||||
|
||||
return &ServiceController{
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
clusterName: clusterName,
|
||||
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
nodeLister: cache.StoreToNodeLister{
|
||||
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts a background goroutine that watches for changes to services that
|
||||
// have (or had) externalLoadBalancers=true and ensures that they have external
|
||||
// load balancers created and deleted appropriately.
|
||||
// serviceSyncPeriod controls how often we check the cluster's services to
|
||||
// ensure that the correct external load balancers exist.
|
||||
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
|
||||
// if external load balancers need to be updated to point to a new set.
|
||||
//
|
||||
// It's an error to call Run() more than once for a given ServiceController
|
||||
// object.
|
||||
func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration) error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We have to make this check beecause the ListWatch that we use in
|
||||
// WatchServices requires Client functions that aren't in the interface
|
||||
// for some reason.
|
||||
if _, ok := s.kubeClient.(*client.Client); !ok {
|
||||
return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.")
|
||||
}
|
||||
|
||||
// Get the currently existing set of services and then all future creates
|
||||
// and updates of services.
|
||||
// A delta compressor is needed for the DeltaFIFO queue because we only ever
|
||||
// care about the most recent state.
|
||||
serviceQueue := cache.NewDeltaFIFO(
|
||||
cache.MetaNamespaceKeyFunc,
|
||||
cache.DeltaCompressorFunc(func(d cache.Deltas) cache.Deltas {
|
||||
if len(d) == 0 {
|
||||
return d
|
||||
}
|
||||
return cache.Deltas{*d.Newest()}
|
||||
}),
|
||||
s.cache,
|
||||
)
|
||||
lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(lw, &api.Service{}, serviceQueue, serviceSyncPeriod).Run()
|
||||
for i := 0; i < workerGoroutines; i++ {
|
||||
go s.watchServices(serviceQueue)
|
||||
}
|
||||
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run()
|
||||
go s.nodeSyncLoop(nodeSyncPeriod)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ServiceController) init() error {
|
||||
if s.cloud == nil {
|
||||
return fmt.Errorf("ServiceController should not be run without a cloudprovider.")
|
||||
}
|
||||
|
||||
balancer, ok := s.cloud.TCPLoadBalancer()
|
||||
if !ok {
|
||||
return fmt.Errorf("the cloud provider does not support external TCP load balancers.")
|
||||
}
|
||||
s.balancer = balancer
|
||||
|
||||
zones, ok := s.cloud.Zones()
|
||||
if !ok {
|
||||
return fmt.Errorf("the cloud provider does not support zone enumeration, which is required for creating external load balancers.")
|
||||
}
|
||||
zone, err := zones.GetZone()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get zone from cloud provider, will not be able to create external load balancers: %v", err)
|
||||
}
|
||||
s.zone = zone
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop infinitely, processing all service updates provided by the queue.
|
||||
func (s *ServiceController) watchServices(serviceQueue *cache.DeltaFIFO) {
|
||||
for {
|
||||
newItem := serviceQueue.Pop()
|
||||
deltas, ok := newItem.(cache.Deltas)
|
||||
if !ok {
|
||||
glog.Errorf("Received object from service watcher that wasn't Deltas: %+v", newItem)
|
||||
}
|
||||
delta := deltas.Newest()
|
||||
if delta == nil {
|
||||
glog.Errorf("Received nil delta from watcher queue.")
|
||||
continue
|
||||
}
|
||||
err, shouldRetry := s.processDelta(delta)
|
||||
if shouldRetry {
|
||||
// Add the failed service back to the queue so we'll retry it.
|
||||
glog.Errorf("Failed to process service delta. Retrying: %v", err)
|
||||
time.Sleep(processingRetryInterval)
|
||||
serviceQueue.AddIfNotPresent(deltas)
|
||||
} else if err != nil {
|
||||
util.HandleError(fmt.Errorf("Failed to process service delta. Not retrying: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an error if processing the delta failed, along with a boolean
|
||||
// indicator of whether the processing should be retried.
|
||||
func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
|
||||
service, ok := delta.Object.(*api.Service)
|
||||
var namespacedName types.NamespacedName
|
||||
var cachedService *cachedService
|
||||
if !ok {
|
||||
// If the DeltaFIFO saw a key in our cache that it didn't know about, it
|
||||
// can send a deletion with an unknown state. Grab the service from our
|
||||
// cache for deleting.
|
||||
key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return fmt.Errorf("Delta contained object that wasn't a service or a deleted key: %+v", delta), notRetryable
|
||||
}
|
||||
cachedService, ok = s.cache.get(key.Key)
|
||||
if !ok {
|
||||
return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), notRetryable
|
||||
}
|
||||
service = cachedService.lastState
|
||||
delta.Object = cachedService.lastState
|
||||
namespacedName = types.NamespacedName{service.Namespace, service.Name}
|
||||
} else {
|
||||
namespacedName.Namespace = service.Namespace
|
||||
namespacedName.Name = service.Name
|
||||
cachedService = s.cache.getOrCreate(namespacedName.String())
|
||||
}
|
||||
glog.V(2).Infof("Got new %s delta for service: %+v", delta.Type, service)
|
||||
|
||||
// Ensure that no other goroutine will interfere with our processing of the
|
||||
// service.
|
||||
cachedService.mu.Lock()
|
||||
defer cachedService.mu.Unlock()
|
||||
|
||||
// Update the cached service (used above for populating synthetic deletes)
|
||||
cachedService.lastState = service
|
||||
|
||||
// TODO: Handle added, updated, and sync differently?
|
||||
switch delta.Type {
|
||||
case cache.Added:
|
||||
fallthrough
|
||||
case cache.Updated:
|
||||
fallthrough
|
||||
case cache.Sync:
|
||||
err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
|
||||
if err != nil {
|
||||
s.eventRecorder.Event(service, "creating loadbalancer failed", err.Error())
|
||||
return err, retry
|
||||
}
|
||||
// Always update the cache upon success.
|
||||
// NOTE: Since we update the cached service if and only if we successully
|
||||
// processed it, a cached service being nil implies that it hasn't yet
|
||||
// been successfully processed.
|
||||
cachedService.appliedState = service
|
||||
s.cache.set(namespacedName.String(), cachedService)
|
||||
case cache.Deleted:
|
||||
err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
|
||||
if err != nil {
|
||||
s.eventRecorder.Event(service, "deleting loadbalancer failed", err.Error())
|
||||
return err, retryable
|
||||
}
|
||||
s.cache.delete(namespacedName.String())
|
||||
default:
|
||||
glog.Errorf("Unexpected delta type: %v", delta.Type)
|
||||
}
|
||||
return nil, notRetryable
|
||||
}
|
||||
|
||||
// Returns whatever error occurred along with a boolean indicator of whether it
|
||||
// should be retried.
|
||||
func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.NamespacedName, service, cachedService *api.Service) (error, bool) {
|
||||
if cachedService != nil && !needsUpdate(cachedService, service) {
|
||||
glog.Infof("LB already exists and doesn't need update for service %s", namespacedName)
|
||||
return nil, notRetryable
|
||||
}
|
||||
if cachedService != nil {
|
||||
// If the service already exists but needs to be updated, delete it so that
|
||||
// we can recreate it cleanly.
|
||||
if wantsExternalLoadBalancer(cachedService) {
|
||||
glog.Infof("Deleting existing load balancer for service %s that needs an updated load balancer.", namespacedName)
|
||||
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(cachedService), s.zone.Region); err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If we don't have any cached memory of the load balancer, we have to ask
|
||||
// the cloud provider for what it knows about it.
|
||||
status, exists, err := s.balancer.GetTCPLoadBalancer(s.loadBalancerName(service), s.zone.Region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting LB for service %s: %v", namespacedName, err), retryable
|
||||
}
|
||||
if exists && api.LoadBalancerStatusEqual(status, &service.Status.LoadBalancer) {
|
||||
glog.Infof("LB already exists with status %s for previously uncached service %s", status, namespacedName)
|
||||
return nil, notRetryable
|
||||
} else if exists {
|
||||
glog.Infof("Deleting old LB for previously uncached service %s whose endpoint %s doesn't match the service's desired IPs %v",
|
||||
namespacedName, status, service.Spec.DeprecatedPublicIPs)
|
||||
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save the state so we can avoid a write if it doesn't change
|
||||
previousState := api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
|
||||
if !wantsExternalLoadBalancer(service) {
|
||||
glog.Infof("Not creating LB for service %s that doesn't want one.", namespacedName)
|
||||
|
||||
service.Status.LoadBalancer = api.LoadBalancerStatus{}
|
||||
} else {
|
||||
glog.V(2).Infof("Creating LB for service %s", namespacedName)
|
||||
|
||||
// The load balancer doesn't exist yet, so create it.
|
||||
err := s.createExternalLoadBalancer(service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create external load balancer for service %s: %v", namespacedName, err), retryable
|
||||
}
|
||||
}
|
||||
|
||||
// Write the state if changed
|
||||
// TODO: Be careful here ... what if there were other changes to the service?
|
||||
if !api.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
|
||||
if err := s.persistUpdate(service); err != nil {
|
||||
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Not persisting unchanged LoadBalancerStatus to registry.")
|
||||
}
|
||||
|
||||
return nil, notRetryable
|
||||
}
|
||||
|
||||
func (s *ServiceController) persistUpdate(service *api.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClient.Services(service.Namespace).Update(service)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// If the object no longer exists, we don't want to recreate it. Just bail
|
||||
// out so that we can process the delete, which we should soon be receiving
|
||||
// if we haven't already.
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Not persisting update to service that no longer exists: %v", err)
|
||||
return nil
|
||||
}
|
||||
// TODO: Try to resolve the conflict if the change was unrelated to load
|
||||
// balancer status. For now, just rely on the fact that we'll
|
||||
// also process the update that caused the resource version to change.
|
||||
if errors.IsConflict(err) {
|
||||
glog.Infof("Not persisting update to service that has been changed since we received it: %v", err)
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Failed to persist updated LoadBalancerStatus to service %s after creating its external load balancer: %v",
|
||||
service.Name, err)
|
||||
time.Sleep(clientRetryInterval)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ServiceController) createExternalLoadBalancer(service *api.Service) error {
|
||||
ports, err := getPortsForLB(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes, err := s.nodeLister.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := s.loadBalancerName(service)
|
||||
if len(service.Spec.DeprecatedPublicIPs) > 0 {
|
||||
for _, publicIP := range service.Spec.DeprecatedPublicIPs {
|
||||
// TODO: Make this actually work for multiple IPs by using different
|
||||
// names for each. For now, we'll just create the first and break.
|
||||
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
|
||||
ports, hostsFromNodeList(&nodes), service.Spec.SessionAffinity)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
service.Status.LoadBalancer = *status
|
||||
}
|
||||
break
|
||||
}
|
||||
} else {
|
||||
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
|
||||
ports, hostsFromNodeList(&nodes), service.Spec.SessionAffinity)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
service.Status.LoadBalancer = *status
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) ListKeys() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
keys := make([]string, 0, len(s.serviceMap))
|
||||
for k := range s.serviceMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetByKey returns the value stored in the serviceMap under the given key
|
||||
func (s *serviceCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if v, ok := s.serviceMap[key]; ok {
|
||||
return v, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) allServices() []*cachedService {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
services := make([]*cachedService, 0, len(s.serviceMap))
|
||||
for _, v := range s.serviceMap {
|
||||
services = append(services, v)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
func (s *serviceCache) get(serviceName string) (*cachedService, bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
return service, ok
|
||||
}
|
||||
|
||||
func (s *serviceCache) getOrCreate(serviceName string) *cachedService {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
if !ok {
|
||||
service = &cachedService{}
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
func (s *serviceCache) set(serviceName string, service *cachedService) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
|
||||
func (s *serviceCache) delete(serviceName string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.serviceMap, serviceName)
|
||||
}
|
||||
|
||||
func needsUpdate(oldService *api.Service, newService *api.Service) bool {
|
||||
if !wantsExternalLoadBalancer(oldService) && !wantsExternalLoadBalancer(newService) {
|
||||
return false
|
||||
}
|
||||
if wantsExternalLoadBalancer(oldService) != wantsExternalLoadBalancer(newService) {
|
||||
return true
|
||||
}
|
||||
if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity {
|
||||
return true
|
||||
}
|
||||
if len(oldService.Spec.DeprecatedPublicIPs) != len(newService.Spec.DeprecatedPublicIPs) {
|
||||
return true
|
||||
}
|
||||
for i := range oldService.Spec.DeprecatedPublicIPs {
|
||||
if oldService.Spec.DeprecatedPublicIPs[i] != newService.Spec.DeprecatedPublicIPs[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ServiceController) loadBalancerName(service *api.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) {
|
||||
ports := []*api.ServicePort{}
|
||||
for i := range service.Spec.Ports {
|
||||
// TODO: Support UDP. Remove the check from the API validation package once
|
||||
// it's supported.
|
||||
sp := &service.Spec.Ports[i]
|
||||
if sp.Protocol != api.ProtocolTCP {
|
||||
return nil, fmt.Errorf("external load balancers for non TCP services are not currently supported.")
|
||||
}
|
||||
ports = append(ports, sp)
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func portsEqualForLB(x, y *api.Service) bool {
|
||||
xPorts, err := getPortsForLB(x)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
yPorts, err := getPortsForLB(y)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return portSlicesEqualForLB(xPorts, yPorts)
|
||||
}
|
||||
|
||||
func portSlicesEqualForLB(x, y []*api.ServicePort) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range x {
|
||||
if !portEqualForLB(x[i], y[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func portEqualForLB(x, y *api.ServicePort) bool {
|
||||
// TODO: Should we check name? (In theory, an LB could expose it)
|
||||
if x.Name != y.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Protocol != y.Protocol {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Port != y.Port {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.NodePort != y.NodePort {
|
||||
return false
|
||||
}
|
||||
|
||||
// We don't check TargetPort; that is not relevant for load balancing
|
||||
// TODO: Should we blank it out? Or just check it anyway?
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func intSlicesEqual(x, y []int) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
if !sort.IntsAreSorted(x) {
|
||||
sort.Ints(x)
|
||||
}
|
||||
if !sort.IntsAreSorted(y) {
|
||||
sort.Ints(y)
|
||||
}
|
||||
for i := range x {
|
||||
if x[i] != y[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stringSlicesEqual(x, y []string) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
if !sort.StringsAreSorted(x) {
|
||||
sort.Strings(x)
|
||||
}
|
||||
if !sort.StringsAreSorted(y) {
|
||||
sort.Strings(y)
|
||||
}
|
||||
for i := range x {
|
||||
if x[i] != y[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func hostsFromNodeList(list *api.NodeList) []string {
|
||||
result := make([]string, len(list.Items))
|
||||
for ix := range list.Items {
|
||||
result[ix] = list.Items[ix].Name
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// nodeSyncLoop handles updating the hosts pointed to by all external load
|
||||
// balancers whenever the set of nodes in the cluster changes.
|
||||
func (s *ServiceController) nodeSyncLoop(period time.Duration) {
|
||||
var prevHosts []string
|
||||
var servicesToUpdate []*cachedService
|
||||
// TODO: Eliminate the unneeded now variable once we stop compiling in go1.3.
|
||||
// It's needed at the moment because go1.3 requires ranges to be assigned to
|
||||
// something to compile, and gofmt1.4 complains about using `_ = range`.
|
||||
for now := range time.Tick(period) {
|
||||
_ = now
|
||||
nodes, err := s.nodeLister.List()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
|
||||
continue
|
||||
}
|
||||
newHosts := hostsFromNodeList(&nodes)
|
||||
if stringSlicesEqual(newHosts, prevHosts) {
|
||||
// The set of nodes in the cluster hasn't changed, but we can retry
|
||||
// updating any services that we failed to update last time around.
|
||||
servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
|
||||
continue
|
||||
}
|
||||
glog.Infof("Detected change in list of current cluster nodes. New node set: %v", newHosts)
|
||||
|
||||
// Try updating all services, and save the ones that fail to try again next
|
||||
// round.
|
||||
servicesToUpdate = s.cache.allServices()
|
||||
numServices := len(servicesToUpdate)
|
||||
servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
|
||||
glog.Infof("Successfully updated %d out of %d external load balancers to direct traffic to the updated set of nodes",
|
||||
numServices-len(servicesToUpdate), numServices)
|
||||
|
||||
prevHosts = newHosts
|
||||
}
|
||||
}
|
||||
|
||||
// updateLoadBalancerHosts updates all existing external load balancers so that
|
||||
// they will match the list of hosts provided.
|
||||
// Returns the list of services that couldn't be updated.
|
||||
func (s *ServiceController) updateLoadBalancerHosts(services []*cachedService, hosts []string) (servicesToRetry []*cachedService) {
|
||||
for _, service := range services {
|
||||
func() {
|
||||
service.mu.Lock()
|
||||
defer service.mu.Unlock()
|
||||
// If the service is nil, that means it hasn't yet been successfully dealt
|
||||
// with by the load balancer reconciler. We can trust the load balancer
|
||||
// reconciler to ensure the service's load balancer is created to target
|
||||
// the correct nodes.
|
||||
if service.appliedState == nil {
|
||||
return
|
||||
}
|
||||
if err := s.lockedUpdateLoadBalancerHosts(service.appliedState, hosts); err != nil {
|
||||
glog.Errorf("External error while updating TCP load balancer: %v.", err)
|
||||
servicesToRetry = append(servicesToRetry, service)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return servicesToRetry
|
||||
}
|
||||
|
||||
// Updates the external load balancer of a service, assuming we hold the mutex
|
||||
// associated with the service.
|
||||
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, hosts []string) error {
|
||||
if !wantsExternalLoadBalancer(service) {
|
||||
return nil
|
||||
}
|
||||
|
||||
name := cloudprovider.GetLoadBalancerName(service)
|
||||
err := s.balancer.UpdateTCPLoadBalancer(name, s.zone.Region, hosts)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// It's only an actual error if the load balancer still exists.
|
||||
if _, exists, err := s.balancer.GetTCPLoadBalancer(name, s.zone.Region); err != nil {
|
||||
glog.Errorf("External error while checking if TCP load balancer %q exists: name, %v")
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func wantsExternalLoadBalancer(service *api.Service) bool {
|
||||
return service.Spec.Type == api.ServiceTypeLoadBalancer
|
||||
}
|
221
pkg/controller/service/servicecontroller_test.go
Normal file
221
pkg/controller/service/servicecontroller_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecontroller
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const region = "us-central"
|
||||
|
||||
func newService(name string, uid types.UID, serviceType api.ServiceType) *api.Service {
|
||||
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid}, Spec: api.ServiceSpec{Type: serviceType}}
|
||||
}
|
||||
|
||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
table := []struct {
|
||||
service *api.Service
|
||||
expectErr bool
|
||||
expectCreateAttempt bool
|
||||
}{
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "no-external-balancer",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: false,
|
||||
},
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "udp-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: api.ProtocolUDP,
|
||||
}},
|
||||
Type: api.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
expectCreateAttempt: false,
|
||||
},
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "basic-service1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: api.ProtocolTCP,
|
||||
}},
|
||||
Type: api.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
cloud := &fake_cloud.FakeCloud{}
|
||||
cloud.Region = region
|
||||
client := &testclient.Fake{}
|
||||
controller := New(cloud, client, "test-cluster")
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
client.ClearActions() // ignore any client calls made in init()
|
||||
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil)
|
||||
if !item.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if item.expectErr && err == nil {
|
||||
t.Errorf("expected error creating %v, got nil", item.service)
|
||||
}
|
||||
actions := client.Actions()
|
||||
if !item.expectCreateAttempt {
|
||||
if len(cloud.Calls) > 0 {
|
||||
t.Errorf("unexpected cloud provider calls: %v", cloud.Calls)
|
||||
}
|
||||
if len(actions) > 0 {
|
||||
t.Errorf("unexpected client actions: %v", actions)
|
||||
}
|
||||
} else {
|
||||
if len(cloud.Balancers) != 1 {
|
||||
t.Errorf("expected one load balancer to be created, got %v", cloud.Balancers)
|
||||
} else if cloud.Balancers[0].Name != controller.loadBalancerName(item.service) ||
|
||||
cloud.Balancers[0].Region != region ||
|
||||
cloud.Balancers[0].Ports[0].Port != item.service.Spec.Ports[0].Port {
|
||||
t.Errorf("created load balancer has incorrect parameters: %v", cloud.Balancers[0])
|
||||
}
|
||||
actionFound := false
|
||||
for _, action := range actions {
|
||||
if action.Action == "update-service" {
|
||||
actionFound = true
|
||||
}
|
||||
}
|
||||
if !actionFound {
|
||||
t.Errorf("expected updated service to be sent to client, got these actions instead: %v", actions)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Finish converting and update comments
|
||||
func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
hosts := []string{"node0", "node1", "node73"}
|
||||
table := []struct {
|
||||
services []*api.Service
|
||||
expectedUpdateCalls []fake_cloud.FakeUpdateBalancerCall
|
||||
}{
|
||||
{
|
||||
// No services present: no calls should be made.
|
||||
services: []*api.Service{},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services do not have external load balancers: no calls should be made.
|
||||
services: []*api.Service{
|
||||
newService("s0", "111", api.ServiceTypeClusterIP),
|
||||
newService("s1", "222", api.ServiceTypeNodePort),
|
||||
},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services does have an external load balancer: one call should be made.
|
||||
services: []*api.Service{
|
||||
newService("s0", "333", api.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
|
||||
{Name: "a333", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Three services have an external load balancer: three calls.
|
||||
services: []*api.Service{
|
||||
newService("s0", "444", api.ServiceTypeLoadBalancer),
|
||||
newService("s1", "555", api.ServiceTypeLoadBalancer),
|
||||
newService("s2", "666", api.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
|
||||
{Name: "a444", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
{Name: "a555", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
{Name: "a666", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Two services have an external load balancer and two don't: two calls.
|
||||
services: []*api.Service{
|
||||
newService("s0", "777", api.ServiceTypeNodePort),
|
||||
newService("s1", "888", api.ServiceTypeLoadBalancer),
|
||||
newService("s3", "999", api.ServiceTypeLoadBalancer),
|
||||
newService("s4", "123", api.ServiceTypeClusterIP),
|
||||
},
|
||||
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
|
||||
{Name: "a888", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
{Name: "a999", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// One service has an external load balancer and one is nil: one call.
|
||||
services: []*api.Service{
|
||||
newService("s0", "234", api.ServiceTypeLoadBalancer),
|
||||
nil,
|
||||
},
|
||||
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
|
||||
{Name: "a234", Region: region, Hosts: []string{"node0", "node1", "node73"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, item := range table {
|
||||
cloud := &fake_cloud.FakeCloud{}
|
||||
|
||||
cloud.Region = region
|
||||
client := &testclient.Fake{}
|
||||
controller := New(cloud, client, "test-cluster2")
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
|
||||
var services []*cachedService
|
||||
for _, service := range item.services {
|
||||
services = append(services, &cachedService{lastState: service, appliedState: service})
|
||||
}
|
||||
if err := controller.updateLoadBalancerHosts(services, hosts); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedUpdateCalls, cloud.UpdateCalls) {
|
||||
t.Errorf("expected update calls mismatch, expected %+v, got %+v", item.expectedUpdateCalls, cloud.UpdateCalls)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(a-robinson): Add tests for update/sync/delete.
|
19
pkg/controller/serviceaccount/doc.go
Normal file
19
pkg/controller/serviceaccount/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package serviceaccount provides implementations
|
||||
// to manage service accounts and service account tokens
|
||||
package serviceaccount
|
237
pkg/controller/serviceaccount/jwt.go
Normal file
237
pkg/controller/serviceaccount/jwt.go
Normal file
@@ -0,0 +1,237 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/auth/user"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
ServiceAccountUsernamePrefix = "system:serviceaccount"
|
||||
ServiceAccountUsernameSeparator = ":"
|
||||
|
||||
Issuer = "kubernetes/serviceaccount"
|
||||
|
||||
SubjectClaim = "sub"
|
||||
IssuerClaim = "iss"
|
||||
ServiceAccountNameClaim = "kubernetes.io/serviceaccount/service-account.name"
|
||||
ServiceAccountUIDClaim = "kubernetes.io/serviceaccount/service-account.uid"
|
||||
SecretNameClaim = "kubernetes.io/serviceaccount/secret.name"
|
||||
NamespaceClaim = "kubernetes.io/serviceaccount/namespace"
|
||||
)
|
||||
|
||||
type TokenGenerator interface {
|
||||
// GenerateToken generates a token which will identify the given ServiceAccount.
|
||||
// The returned token will be stored in the given (and yet-unpersisted) Secret.
|
||||
GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error)
|
||||
}
|
||||
|
||||
// ReadPrivateKey is a helper function for reading an rsa.PrivateKey from a PEM-encoded file
|
||||
func ReadPrivateKey(file string) (*rsa.PrivateKey, error) {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwt.ParseRSAPrivateKeyFromPEM(data)
|
||||
}
|
||||
|
||||
// ReadPublicKey is a helper function for reading an rsa.PublicKey from a PEM-encoded file
|
||||
// Reads public keys from both public and private key files
|
||||
func ReadPublicKey(file string) (*rsa.PublicKey, error) {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(data); err == nil {
|
||||
return &privateKey.PublicKey, nil
|
||||
}
|
||||
|
||||
return jwt.ParseRSAPublicKeyFromPEM(data)
|
||||
}
|
||||
|
||||
// MakeUsername generates a username from the given namespace and ServiceAccount name.
|
||||
// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name.
|
||||
func MakeUsername(namespace, name string) string {
|
||||
return strings.Join([]string{ServiceAccountUsernamePrefix, namespace, name}, ServiceAccountUsernameSeparator)
|
||||
}
|
||||
|
||||
// SplitUsername returns the namespace and ServiceAccount name embedded in the given username,
|
||||
// or an error if the username is not a valid name produced by MakeUsername
|
||||
func SplitUsername(username string) (string, string, error) {
|
||||
if !strings.HasPrefix(username, ServiceAccountUsernamePrefix+ServiceAccountUsernameSeparator) {
|
||||
return "", "", fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name"))
|
||||
}
|
||||
username = strings.TrimPrefix(username, ServiceAccountUsernamePrefix+ServiceAccountUsernameSeparator)
|
||||
parts := strings.Split(username, ServiceAccountUsernameSeparator)
|
||||
if len(parts) != 2 || len(parts[0]) == 0 || len(parts[1]) == 0 {
|
||||
return "", "", fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name"))
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
// JWTTokenGenerator returns a TokenGenerator that generates signed JWT tokens, using the given privateKey.
|
||||
// privateKey is a PEM-encoded byte array of a private RSA key.
|
||||
// JWTTokenAuthenticator()
|
||||
func JWTTokenGenerator(key *rsa.PrivateKey) TokenGenerator {
|
||||
return &jwtTokenGenerator{key}
|
||||
}
|
||||
|
||||
type jwtTokenGenerator struct {
|
||||
key *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func (j *jwtTokenGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) {
|
||||
token := jwt.New(jwt.SigningMethodRS256)
|
||||
|
||||
// Identify the issuer
|
||||
token.Claims[IssuerClaim] = Issuer
|
||||
|
||||
// Username: `serviceaccount:<namespace>:<serviceaccount>`
|
||||
token.Claims[SubjectClaim] = MakeUsername(serviceAccount.Namespace, serviceAccount.Name)
|
||||
|
||||
// Persist enough structured info for the authenticator to be able to look up the service account and secret
|
||||
token.Claims[NamespaceClaim] = serviceAccount.Namespace
|
||||
token.Claims[ServiceAccountNameClaim] = serviceAccount.Name
|
||||
token.Claims[ServiceAccountUIDClaim] = serviceAccount.UID
|
||||
token.Claims[SecretNameClaim] = secret.Name
|
||||
|
||||
// Sign and get the complete encoded token as a string
|
||||
return token.SignedString(j.key)
|
||||
}
|
||||
|
||||
// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator
|
||||
// Token signatures are verified using each of the given public keys until one works (allowing key rotation)
|
||||
// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter
|
||||
func JWTTokenAuthenticator(keys []*rsa.PublicKey, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token {
|
||||
return &jwtTokenAuthenticator{keys, lookup, getter}
|
||||
}
|
||||
|
||||
type jwtTokenAuthenticator struct {
|
||||
keys []*rsa.PublicKey
|
||||
lookup bool
|
||||
getter ServiceAccountTokenGetter
|
||||
}
|
||||
|
||||
func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, error) {
|
||||
var validationError error
|
||||
|
||||
for i, key := range j.keys {
|
||||
// Attempt to verify with each key until we find one that works
|
||||
parsedToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
return key, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *jwt.ValidationError:
|
||||
if (err.Errors & jwt.ValidationErrorMalformed) != 0 {
|
||||
// Not a JWT, no point in continuing
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if (err.Errors & jwt.ValidationErrorSignatureInvalid) != 0 {
|
||||
// Signature error, perhaps one of the other keys will verify the signature
|
||||
// If not, we want to return this error
|
||||
glog.V(4).Infof("Signature error (key %d): %v", i, err)
|
||||
validationError = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Other errors should just return as errors
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// If we get here, we have a token with a recognized signature
|
||||
|
||||
// Make sure we issued the token
|
||||
iss, _ := parsedToken.Claims[IssuerClaim].(string)
|
||||
if iss != Issuer {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Make sure the claims we need exist
|
||||
sub, _ := parsedToken.Claims[SubjectClaim].(string)
|
||||
if len(sub) == 0 {
|
||||
return nil, false, errors.New("sub claim is missing")
|
||||
}
|
||||
namespace, _ := parsedToken.Claims[NamespaceClaim].(string)
|
||||
if len(namespace) == 0 {
|
||||
return nil, false, errors.New("namespace claim is missing")
|
||||
}
|
||||
secretName, _ := parsedToken.Claims[SecretNameClaim].(string)
|
||||
if len(namespace) == 0 {
|
||||
return nil, false, errors.New("secretName claim is missing")
|
||||
}
|
||||
serviceAccountName, _ := parsedToken.Claims[ServiceAccountNameClaim].(string)
|
||||
if len(serviceAccountName) == 0 {
|
||||
return nil, false, errors.New("serviceAccountName claim is missing")
|
||||
}
|
||||
serviceAccountUID, _ := parsedToken.Claims[ServiceAccountUIDClaim].(string)
|
||||
if len(serviceAccountUID) == 0 {
|
||||
return nil, false, errors.New("serviceAccountUID claim is missing")
|
||||
}
|
||||
|
||||
if j.lookup {
|
||||
// Make sure token hasn't been invalidated by deletion of the secret
|
||||
secret, err := j.getter.GetSecret(namespace, secretName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
|
||||
return nil, false, errors.New("Token has been invalidated")
|
||||
}
|
||||
if bytes.Compare(secret.Data[api.ServiceAccountTokenKey], []byte(token)) != 0 {
|
||||
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
|
||||
return nil, false, errors.New("Token does not match server's copy")
|
||||
}
|
||||
|
||||
// Make sure service account still exists (name and UID)
|
||||
serviceAccount, err := j.getter.GetServiceAccount(namespace, serviceAccountName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
|
||||
return nil, false, err
|
||||
}
|
||||
if string(serviceAccount.UID) != serviceAccountUID {
|
||||
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
|
||||
return nil, false, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
|
||||
}
|
||||
}
|
||||
|
||||
return &user.DefaultInfo{
|
||||
Name: sub,
|
||||
UID: serviceAccountUID,
|
||||
Groups: []string{},
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
return nil, false, validationError
|
||||
}
|
263
pkg/controller/serviceaccount/jwt_test.go
Normal file
263
pkg/controller/serviceaccount/jwt_test.go
Normal file
@@ -0,0 +1,263 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
const otherPublicKey = `-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArXz0QkIG1B5Bj2/W69GH
|
||||
rsm5e+RC3kE+VTgocge0atqlLBek35tRqLgUi3AcIrBZ/0YctMSWDVcRt5fkhWwe
|
||||
Lqjj6qvAyNyOkrkBi1NFDpJBjYJtuKHgRhNxXbOzTSNpdSKXTfOkzqv56MwHOP25
|
||||
yP/NNAODUtr92D5ySI5QX8RbXW+uDn+ixul286PBW/BCrE4tuS88dA0tYJPf8LCu
|
||||
sqQOwlXYH/rNUg4Pyl9xxhR5DIJR0OzNNfChjw60zieRIt2LfM83fXhwk8IxRGkc
|
||||
gPZm7ZsipmfbZK2Tkhnpsa4QxDg7zHJPMsB5kxRXW0cQipXcC3baDyN9KBApNXa0
|
||||
PwIDAQAB
|
||||
-----END PUBLIC KEY-----`
|
||||
|
||||
const publicKey = `-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA249XwEo9k4tM8fMxV7zx
|
||||
OhcrP+WvXn917koM5Qr2ZXs4vo26e4ytdlrV0bQ9SlcLpQVSYjIxNfhTZdDt+ecI
|
||||
zshKuv1gKIxbbLQMOuK1eA/4HALyEkFgmS/tleLJrhc65tKPMGD+pKQ/xhmzRuCG
|
||||
51RoiMgbQxaCyYxGfNLpLAZK9L0Tctv9a0mJmGIYnIOQM4kC1A1I1n3EsXMWmeJU
|
||||
j7OTh/AjjCnMnkgvKT2tpKxYQ59PgDgU8Ssc7RDSmSkLxnrv+OrN80j6xrw0OjEi
|
||||
B4Ycr0PqfzZcvy8efTtFQ/Jnc4Bp1zUtFXt7+QeevePtQ2EcyELXE0i63T1CujRM
|
||||
WwIDAQAB
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
|
||||
const privateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEA249XwEo9k4tM8fMxV7zxOhcrP+WvXn917koM5Qr2ZXs4vo26
|
||||
e4ytdlrV0bQ9SlcLpQVSYjIxNfhTZdDt+ecIzshKuv1gKIxbbLQMOuK1eA/4HALy
|
||||
EkFgmS/tleLJrhc65tKPMGD+pKQ/xhmzRuCG51RoiMgbQxaCyYxGfNLpLAZK9L0T
|
||||
ctv9a0mJmGIYnIOQM4kC1A1I1n3EsXMWmeJUj7OTh/AjjCnMnkgvKT2tpKxYQ59P
|
||||
gDgU8Ssc7RDSmSkLxnrv+OrN80j6xrw0OjEiB4Ycr0PqfzZcvy8efTtFQ/Jnc4Bp
|
||||
1zUtFXt7+QeevePtQ2EcyELXE0i63T1CujRMWwIDAQABAoIBAHJx8GqyCBDNbqk7
|
||||
e7/hI9iE1S10Wwol5GH2RWxqX28cYMKq+8aE2LI1vPiXO89xOgelk4DN6urX6xjK
|
||||
ZBF8RRIMQy/e/O2F4+3wl+Nl4vOXV1u6iVXMsD6JRg137mqJf1Fr9elg1bsaRofL
|
||||
Q7CxPoB8dhS+Qb+hj0DhlqhgA9zG345CQCAds0ZYAZe8fP7bkwrLqZpMn7Dz9WVm
|
||||
++YgYYKjuE95kPuup/LtWfA9rJyE/Fws8/jGvRSpVn1XglMLSMKhLd27sE8ZUSV0
|
||||
2KUzbfRGE0+AnRULRrjpYaPu0XQ2JjdNvtkjBnv27RB89W9Gklxq821eH1Y8got8
|
||||
FZodjxECgYEA93pz7AQZ2xDs67d1XLCzpX84GxKzttirmyj3OIlxgzVHjEMsvw8v
|
||||
sjFiBU5xEEQDosrBdSknnlJqyiq1YwWG/WDckr13d8G2RQWoySN7JVmTQfXcLoTu
|
||||
YGRiiTuoEi3ab3ZqrgGrFgX7T/cHuasbYvzCvhM2b4VIR3aSxU2DTUMCgYEA4x7J
|
||||
T/ErP6GkU5nKstu/mIXwNzayEO1BJvPYsy7i7EsxTm3xe/b8/6cYOz5fvJLGH5mT
|
||||
Q8YvuLqBcMwZardrYcwokD55UvNLOyfADDFZ6l3WntIqbA640Ok2g1X4U8J09xIq
|
||||
ZLIWK1yWbbvi4QCeN5hvWq47e8sIj5QHjIIjRwkCgYEAyNqjltxFN9zmzPDa2d24
|
||||
EAvOt3pYTYBQ1t9KtqImdL0bUqV6fZ6PsWoPCgt+DBuHb+prVPGP7Bkr/uTmznU/
|
||||
+AlTO+12NsYLbr2HHagkXE31DEXE7CSLa8RNjN/UKtz4Ohq7vnowJvG35FCz/mb3
|
||||
FUHbtHTXa2+bGBUOTf/5Hw0CgYBxw0r9EwUhw1qnUYJ5op7OzFAtp+T7m4ul8kCa
|
||||
SCL8TxGsgl+SQ34opE775dtYfoBk9a0RJqVit3D8yg71KFjOTNAIqHJm/Vyyjc+h
|
||||
i9rJDSXiuczsAVfLtPVMRfS0J9QkqeG4PIfkQmVLI/CZ2ZBmsqEcX+eFs4ZfPLun
|
||||
Qsxe2QKBgGuPilIbLeIBDIaPiUI0FwU8v2j8CEQBYvoQn34c95hVQsig/o5z7zlo
|
||||
UsO0wlTngXKlWdOcCs1kqEhTLrstf48djDxAYAxkw40nzeJOt7q52ib/fvf4/UBy
|
||||
X024wzbiw1q07jFCyfQmODzURAx1VNT7QVUMdz/N8vy47/H40AZJ
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
|
||||
func getPrivateKey(data string) *rsa.PrivateKey {
|
||||
key, _ := jwt.ParseRSAPrivateKeyFromPEM([]byte(data))
|
||||
return key
|
||||
}
|
||||
|
||||
func getPublicKey(data string) *rsa.PublicKey {
|
||||
key, _ := jwt.ParseRSAPublicKeyFromPEM([]byte(data))
|
||||
return key
|
||||
}
|
||||
|
||||
func TestReadPrivateKey(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating tmpfile: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if err := ioutil.WriteFile(f.Name(), []byte(privateKey), os.FileMode(0600)); err != nil {
|
||||
t.Fatalf("error creating tmpfile: %v", err)
|
||||
}
|
||||
|
||||
if _, err := ReadPrivateKey(f.Name()); err != nil {
|
||||
t.Fatalf("error reading key: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPublicKey(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating tmpfile: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if err := ioutil.WriteFile(f.Name(), []byte(publicKey), os.FileMode(0600)); err != nil {
|
||||
t.Fatalf("error creating tmpfile: %v", err)
|
||||
}
|
||||
|
||||
if _, err := ReadPublicKey(f.Name()); err != nil {
|
||||
t.Fatalf("error reading key: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenGenerateAndValidate(t *testing.T) {
|
||||
expectedUserName := "system:serviceaccount:test:my-service-account"
|
||||
expectedUserUID := "12345"
|
||||
|
||||
// Related API objects
|
||||
serviceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "my-service-account",
|
||||
UID: "12345",
|
||||
Namespace: "test",
|
||||
},
|
||||
}
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
Namespace: "test",
|
||||
},
|
||||
}
|
||||
|
||||
// Generate the token
|
||||
generator := JWTTokenGenerator(getPrivateKey(privateKey))
|
||||
token, err := generator.GenerateToken(*serviceAccount, *secret)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating token: %v", err)
|
||||
}
|
||||
if len(token) == 0 {
|
||||
t.Fatalf("no token generated")
|
||||
}
|
||||
|
||||
// "Save" the token
|
||||
secret.Data = map[string][]byte{
|
||||
"token": []byte(token),
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
Client client.Interface
|
||||
Keys []*rsa.PublicKey
|
||||
|
||||
ExpectedErr bool
|
||||
ExpectedOK bool
|
||||
ExpectedUserName string
|
||||
ExpectedUserUID string
|
||||
}{
|
||||
"no keys": {
|
||||
Client: nil,
|
||||
Keys: []*rsa.PublicKey{},
|
||||
ExpectedErr: false,
|
||||
ExpectedOK: false,
|
||||
},
|
||||
"invalid keys": {
|
||||
Client: nil,
|
||||
Keys: []*rsa.PublicKey{getPublicKey(otherPublicKey)},
|
||||
ExpectedErr: true,
|
||||
ExpectedOK: false,
|
||||
},
|
||||
"valid key": {
|
||||
Client: nil,
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: false,
|
||||
ExpectedOK: true,
|
||||
ExpectedUserName: expectedUserName,
|
||||
ExpectedUserUID: expectedUserUID,
|
||||
},
|
||||
"rotated keys": {
|
||||
Client: nil,
|
||||
Keys: []*rsa.PublicKey{getPublicKey(otherPublicKey), getPublicKey(publicKey)},
|
||||
ExpectedErr: false,
|
||||
ExpectedOK: true,
|
||||
ExpectedUserName: expectedUserName,
|
||||
ExpectedUserUID: expectedUserUID,
|
||||
},
|
||||
"valid lookup": {
|
||||
Client: testclient.NewSimpleFake(serviceAccount, secret),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: false,
|
||||
ExpectedOK: true,
|
||||
ExpectedUserName: expectedUserName,
|
||||
ExpectedUserUID: expectedUserUID,
|
||||
},
|
||||
"invalid secret lookup": {
|
||||
Client: testclient.NewSimpleFake(serviceAccount),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: true,
|
||||
ExpectedOK: false,
|
||||
},
|
||||
"invalid serviceaccount lookup": {
|
||||
Client: testclient.NewSimpleFake(secret),
|
||||
Keys: []*rsa.PublicKey{getPublicKey(publicKey)},
|
||||
ExpectedErr: true,
|
||||
ExpectedOK: false,
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testCases {
|
||||
getter := NewGetterFromClient(tc.Client)
|
||||
authenticator := JWTTokenAuthenticator(tc.Keys, tc.Client != nil, getter)
|
||||
|
||||
user, ok, err := authenticator.AuthenticateToken(token)
|
||||
if (err != nil) != tc.ExpectedErr {
|
||||
t.Errorf("%s: Expected error=%v, got %v", k, tc.ExpectedErr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if ok != tc.ExpectedOK {
|
||||
t.Errorf("%s: Expected ok=%v, got %v", k, tc.ExpectedOK, ok)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if user.GetName() != tc.ExpectedUserName {
|
||||
t.Errorf("%s: Expected username=%v, got %v", k, tc.ExpectedUserName, user.GetName())
|
||||
continue
|
||||
}
|
||||
if user.GetUID() != tc.ExpectedUserUID {
|
||||
t.Errorf("%s: Expected userUID=%v, got %v", k, tc.ExpectedUserUID, user.GetUID())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeSplitUsername(t *testing.T) {
|
||||
username := MakeUsername("ns", "name")
|
||||
ns, name, err := SplitUsername(username)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
if ns != "ns" || name != "name" {
|
||||
t.Errorf("Expected ns/name, got %s/%s", ns, name)
|
||||
}
|
||||
|
||||
invalid := []string{"test", "system:serviceaccount", "system:serviceaccount:", "system:serviceaccount:ns", "system:serviceaccount:ns:name:extra"}
|
||||
for _, n := range invalid {
|
||||
_, _, err := SplitUsername("test")
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for %s", n)
|
||||
}
|
||||
}
|
||||
}
|
251
pkg/controller/serviceaccount/serviceaccounts_controller.go
Normal file
251
pkg/controller/serviceaccount/serviceaccounts_controller.go
Normal file
@@ -0,0 +1,251 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// nameIndexFunc is an index function that indexes based on an object's name
|
||||
func nameIndexFunc(obj interface{}) ([]string, error) {
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return []string{""}, fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
return []string{meta.Name()}, nil
|
||||
}
|
||||
|
||||
// ServiceAccountsControllerOptions contains options for running a ServiceAccountsController
|
||||
type ServiceAccountsControllerOptions struct {
|
||||
// Names is the set of service account names to ensure exist in every namespace
|
||||
Names util.StringSet
|
||||
|
||||
// ServiceAccountResync is the interval between full resyncs of ServiceAccounts.
|
||||
// If non-zero, all service accounts will be re-listed this often.
|
||||
// Otherwise, re-list will be delayed as long as possible (until the watch is closed or times out).
|
||||
ServiceAccountResync time.Duration
|
||||
|
||||
// NamespaceResync is the interval between full resyncs of Namespaces.
|
||||
// If non-zero, all namespaces will be re-listed this often.
|
||||
// Otherwise, re-list will be delayed as long as possible (until the watch is closed or times out).
|
||||
NamespaceResync time.Duration
|
||||
}
|
||||
|
||||
func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions {
|
||||
return ServiceAccountsControllerOptions{Names: util.NewStringSet("default")}
|
||||
}
|
||||
|
||||
// NewServiceAccountsController returns a new *ServiceAccountsController.
|
||||
func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
|
||||
e := &ServiceAccountsController{
|
||||
client: cl,
|
||||
names: options.Names,
|
||||
}
|
||||
|
||||
accountSelector := fields.Everything()
|
||||
if len(options.Names) == 1 {
|
||||
// If we're maintaining a single account, we can scope the accounts we watch to just that name
|
||||
accountSelector = fields.SelectorFromSet(map[string]string{client.ObjectNameField: options.Names.List()[0]})
|
||||
}
|
||||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), accountSelector)
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), accountSelector, rv)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
options.ServiceAccountResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: e.serviceAccountDeleted,
|
||||
},
|
||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.Namespaces().List(labels.Everything(), fields.Everything())
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.Namespaces().Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
options.NamespaceResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.namespaceAdded,
|
||||
UpdateFunc: e.namespaceUpdated,
|
||||
},
|
||||
cache.Indexers{"name": nameIndexFunc},
|
||||
)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// ServiceAccountsController manages ServiceAccount objects inside Namespaces
|
||||
type ServiceAccountsController struct {
|
||||
stopChan chan struct{}
|
||||
|
||||
client client.Interface
|
||||
names util.StringSet
|
||||
|
||||
serviceAccounts cache.Indexer
|
||||
namespaces cache.Indexer
|
||||
|
||||
// Since we join two objects, we'll watch both of them with controllers.
|
||||
serviceAccountController *framework.Controller
|
||||
namespaceController *framework.Controller
|
||||
}
|
||||
|
||||
// Runs controller loops and returns immediately
|
||||
func (e *ServiceAccountsController) Run() {
|
||||
if e.stopChan == nil {
|
||||
e.stopChan = make(chan struct{})
|
||||
go e.serviceAccountController.Run(e.stopChan)
|
||||
go e.namespaceController.Run(e.stopChan)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down this controller
|
||||
func (e *ServiceAccountsController) Stop() {
|
||||
if e.stopChan != nil {
|
||||
close(e.stopChan)
|
||||
e.stopChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountDeleted reacts to a ServiceAccount deletion by recreating a default ServiceAccount in the namespace if needed
|
||||
func (e *ServiceAccountsController) serviceAccountDeleted(obj interface{}) {
|
||||
serviceAccount, ok := obj.(*api.ServiceAccount)
|
||||
if !ok {
|
||||
// Unknown type. If we missed a ServiceAccount deletion, the
|
||||
// corresponding secrets will be cleaned up during the Secret re-list
|
||||
return
|
||||
}
|
||||
// If the deleted service account is one we're maintaining, recreate it
|
||||
if e.names.Has(serviceAccount.Name) {
|
||||
e.createServiceAccountIfNeeded(serviceAccount.Name, serviceAccount.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// namespaceAdded reacts to a Namespace creation by creating a default ServiceAccount object
|
||||
func (e *ServiceAccountsController) namespaceAdded(obj interface{}) {
|
||||
namespace := obj.(*api.Namespace)
|
||||
for _, name := range e.names.List() {
|
||||
e.createServiceAccountIfNeeded(name, namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// namespaceUpdated reacts to a Namespace update (or re-list) by creating a default ServiceAccount in the namespace if needed
|
||||
func (e *ServiceAccountsController) namespaceUpdated(oldObj interface{}, newObj interface{}) {
|
||||
newNamespace := newObj.(*api.Namespace)
|
||||
for _, name := range e.names.List() {
|
||||
e.createServiceAccountIfNeeded(name, newNamespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// createServiceAccountIfNeeded creates a ServiceAccount with the given name in the given namespace if:
|
||||
// * the named ServiceAccount does not already exist
|
||||
// * the specified namespace exists
|
||||
// * the specified namespace is in the ACTIVE phase
|
||||
func (e *ServiceAccountsController) createServiceAccountIfNeeded(name, namespace string) {
|
||||
serviceAccount, err := e.getServiceAccount(name, namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if serviceAccount != nil {
|
||||
// If service account already exists, it doesn't need to be created
|
||||
return
|
||||
}
|
||||
|
||||
namespaceObj, err := e.getNamespace(namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if namespaceObj == nil {
|
||||
// If namespace does not exist, no service account is needed
|
||||
return
|
||||
}
|
||||
if namespaceObj.Status.Phase != api.NamespaceActive {
|
||||
// If namespace is not active, we shouldn't try to create anything
|
||||
return
|
||||
}
|
||||
|
||||
e.createServiceAccount(name, namespace)
|
||||
}
|
||||
|
||||
// createDefaultServiceAccount creates a default ServiceAccount in the specified namespace
|
||||
func (e *ServiceAccountsController) createServiceAccount(name, namespace string) {
|
||||
serviceAccount := &api.ServiceAccount{}
|
||||
serviceAccount.Name = name
|
||||
serviceAccount.Namespace = namespace
|
||||
if _, err := e.client.ServiceAccounts(namespace).Create(serviceAccount); err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getDefaultServiceAccount returns the ServiceAccount with the given name for the given namespace
|
||||
func (e *ServiceAccountsController) getServiceAccount(name, namespace string) (*api.ServiceAccount, error) {
|
||||
key := &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: namespace}}
|
||||
accounts, err := e.serviceAccounts.Index("namespace", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, obj := range accounts {
|
||||
serviceAccount := obj.(*api.ServiceAccount)
|
||||
if name == serviceAccount.Name {
|
||||
return serviceAccount, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getNamespace returns the Namespace with the given name
|
||||
func (e *ServiceAccountsController) getNamespace(name string) (*api.Namespace, error) {
|
||||
key := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: name}}
|
||||
namespaces, err := e.namespaces.Index("name", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(namespaces) == 1 {
|
||||
return namespaces[0].(*api.Namespace), nil
|
||||
}
|
||||
return nil, fmt.Errorf("%d namespaces with the name %s indexed", len(namespaces), name)
|
||||
}
|
212
pkg/controller/serviceaccount/serviceaccounts_controller_test.go
Normal file
212
pkg/controller/serviceaccount/serviceaccounts_controller_test.go
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type serverResponse struct {
|
||||
statusCode int
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func makeTestServer(t *testing.T, namespace string, serviceAccountResponse serverResponse) (*httptest.Server, *util.FakeHandler) {
|
||||
fakeServiceAccountsHandler := util.FakeHandler{
|
||||
StatusCode: serviceAccountResponse.statusCode,
|
||||
ResponseBody: runtime.EncodeOrDie(testapi.Codec(), serviceAccountResponse.obj.(runtime.Object)),
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(testapi.ResourcePath("serviceAccounts", namespace, ""), &fakeServiceAccountsHandler)
|
||||
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
|
||||
t.Errorf("unexpected request: %v", req.RequestURI)
|
||||
res.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
return httptest.NewServer(mux), &fakeServiceAccountsHandler
|
||||
}
|
||||
|
||||
func TestServiceAccountCreation(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
|
||||
defaultName := "default"
|
||||
managedName := "managed"
|
||||
|
||||
activeNS := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: ns},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceActive,
|
||||
},
|
||||
}
|
||||
terminatingNS := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: ns},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceTerminating,
|
||||
},
|
||||
}
|
||||
defaultServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: defaultName,
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
managedServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: managedName,
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
unmanagedServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "other-unmanaged",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
|
||||
testcases := map[string]struct {
|
||||
ExistingNamespace *api.Namespace
|
||||
ExistingServiceAccounts []*api.ServiceAccount
|
||||
|
||||
AddedNamespace *api.Namespace
|
||||
UpdatedNamespace *api.Namespace
|
||||
DeletedServiceAccount *api.ServiceAccount
|
||||
|
||||
ExpectCreatedServiceAccounts []string
|
||||
}{
|
||||
"new active namespace missing serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: util.NewStringSet(defaultName, managedName).List(),
|
||||
},
|
||||
"new active namespace missing serviceaccount": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{defaultName},
|
||||
},
|
||||
"new active namespace with serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
|
||||
"new terminating namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
AddedNamespace: terminatingNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
|
||||
"updated active namespace missing serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: util.NewStringSet(defaultName, managedName).List(),
|
||||
},
|
||||
"updated active namespace missing serviceaccount": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{managedName},
|
||||
},
|
||||
"updated active namespace with serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"updated terminating namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
UpdatedNamespace: terminatingNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
|
||||
"deleted serviceaccount without namespace": {
|
||||
DeletedServiceAccount: defaultServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"deleted serviceaccount with active namespace": {
|
||||
ExistingNamespace: activeNS,
|
||||
DeletedServiceAccount: defaultServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{defaultName},
|
||||
},
|
||||
"deleted serviceaccount with terminating namespace": {
|
||||
ExistingNamespace: terminatingNS,
|
||||
DeletedServiceAccount: defaultServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"deleted unmanaged serviceaccount with active namespace": {
|
||||
ExistingNamespace: activeNS,
|
||||
DeletedServiceAccount: unmanagedServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"deleted unmanaged serviceaccount with terminating namespace": {
|
||||
ExistingNamespace: terminatingNS,
|
||||
DeletedServiceAccount: unmanagedServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
client := testclient.NewSimpleFake(defaultServiceAccount, managedServiceAccount)
|
||||
options := DefaultServiceAccountsControllerOptions()
|
||||
options.Names = util.NewStringSet(defaultName, managedName)
|
||||
controller := NewServiceAccountsController(client, options)
|
||||
|
||||
if tc.ExistingNamespace != nil {
|
||||
controller.namespaces.Add(tc.ExistingNamespace)
|
||||
}
|
||||
for _, s := range tc.ExistingServiceAccounts {
|
||||
controller.serviceAccounts.Add(s)
|
||||
}
|
||||
|
||||
if tc.AddedNamespace != nil {
|
||||
controller.namespaces.Add(tc.AddedNamespace)
|
||||
controller.namespaceAdded(tc.AddedNamespace)
|
||||
}
|
||||
if tc.UpdatedNamespace != nil {
|
||||
controller.namespaces.Add(tc.UpdatedNamespace)
|
||||
controller.namespaceUpdated(nil, tc.UpdatedNamespace)
|
||||
}
|
||||
if tc.DeletedServiceAccount != nil {
|
||||
controller.serviceAccountDeleted(tc.DeletedServiceAccount)
|
||||
}
|
||||
|
||||
actions := client.Actions()
|
||||
if len(tc.ExpectCreatedServiceAccounts) != len(actions) {
|
||||
t.Errorf("%s: Expected to create accounts %#v. Actual actions were: %#v", k, tc.ExpectCreatedServiceAccounts, actions)
|
||||
continue
|
||||
}
|
||||
for i, expectedName := range tc.ExpectCreatedServiceAccounts {
|
||||
action := actions[i]
|
||||
if action.Action != "create-serviceaccount" {
|
||||
t.Errorf("%s: Unexpected action %s", k, action.Action)
|
||||
break
|
||||
}
|
||||
createdAccount := action.Value.(*api.ServiceAccount)
|
||||
if createdAccount.Name != expectedName {
|
||||
t.Errorf("%s: Expected %s to be created, got %s", k, expectedName, createdAccount.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
81
pkg/controller/serviceaccount/tokengetter.go
Normal file
81
pkg/controller/serviceaccount/tokengetter.go
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret"
|
||||
secretetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret/etcd"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount"
|
||||
serviceaccountetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount/etcd"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/storage"
|
||||
)
|
||||
|
||||
// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret
|
||||
type ServiceAccountTokenGetter interface {
|
||||
GetServiceAccount(namespace, name string) (*api.ServiceAccount, error)
|
||||
GetSecret(namespace, name string) (*api.Secret, error)
|
||||
}
|
||||
|
||||
// clientGetter implements ServiceAccountTokenGetter using a client.Interface
|
||||
type clientGetter struct {
|
||||
client client.Interface
|
||||
}
|
||||
|
||||
// NewGetterFromClient returns a ServiceAccountTokenGetter that
|
||||
// uses the specified client to retrieve service accounts and secrets.
|
||||
// The client should NOT authenticate using a service account token
|
||||
// the returned getter will be used to retrieve, or recursion will result.
|
||||
func NewGetterFromClient(c client.Interface) ServiceAccountTokenGetter {
|
||||
return clientGetter{c}
|
||||
}
|
||||
func (c clientGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
return c.client.ServiceAccounts(namespace).Get(name)
|
||||
}
|
||||
func (c clientGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
return c.client.Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
// registryGetter implements ServiceAccountTokenGetter using a service account and secret registry
|
||||
type registryGetter struct {
|
||||
serviceAccounts serviceaccount.Registry
|
||||
secrets secret.Registry
|
||||
}
|
||||
|
||||
// NewGetterFromRegistries returns a ServiceAccountTokenGetter that
|
||||
// uses the specified registries to retrieve service accounts and secrets.
|
||||
func NewGetterFromRegistries(serviceAccounts serviceaccount.Registry, secrets secret.Registry) ServiceAccountTokenGetter {
|
||||
return ®istryGetter{serviceAccounts, secrets}
|
||||
}
|
||||
func (r *registryGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
return r.serviceAccounts.GetServiceAccount(ctx, name)
|
||||
}
|
||||
func (r *registryGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
return r.secrets.GetSecret(ctx, name)
|
||||
}
|
||||
|
||||
// NewGetterFromStorageInterface returns a ServiceAccountTokenGetter that
|
||||
// uses the specified storage to retrieve service accounts and secrets.
|
||||
func NewGetterFromStorageInterface(storage storage.Interface) ServiceAccountTokenGetter {
|
||||
return NewGetterFromRegistries(
|
||||
serviceaccount.NewRegistry(serviceaccountetcd.NewStorage(storage)),
|
||||
secret.NewRegistry(secretetcd.NewStorage(storage)),
|
||||
)
|
||||
}
|
504
pkg/controller/serviceaccount/tokens_controller.go
Normal file
504
pkg/controller/serviceaccount/tokens_controller.go
Normal file
@@ -0,0 +1,504 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// TokensControllerOptions contains options for the TokensController
|
||||
type TokensControllerOptions struct {
|
||||
// TokenGenerator is the generator to use to create new tokens
|
||||
TokenGenerator TokenGenerator
|
||||
// ServiceAccountResync is the time.Duration at which to fully re-list service accounts.
|
||||
// If zero, re-list will be delayed as long as possible
|
||||
ServiceAccountResync time.Duration
|
||||
// SecretResync is the time.Duration at which to fully re-list secrets.
|
||||
// If zero, re-list will be delayed as long as possible
|
||||
SecretResync time.Duration
|
||||
// This CA will be added in the secretes of service accounts
|
||||
RootCA []byte
|
||||
}
|
||||
|
||||
// NewTokensController returns a new *TokensController.
|
||||
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
|
||||
e := &TokensController{
|
||||
client: cl,
|
||||
token: options.TokenGenerator,
|
||||
rootCA: options.RootCA,
|
||||
}
|
||||
|
||||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
options.ServiceAccountResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.serviceAccountAdded,
|
||||
UpdateFunc: e.serviceAccountUpdated,
|
||||
DeleteFunc: e.serviceAccountDeleted,
|
||||
},
|
||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)})
|
||||
e.secrets, e.secretController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector)
|
||||
},
|
||||
WatchFunc: func(rv string) (watch.Interface, error) {
|
||||
return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
options.SecretResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.secretAdded,
|
||||
UpdateFunc: e.secretUpdated,
|
||||
DeleteFunc: e.secretDeleted,
|
||||
},
|
||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
e.serviceAccountsSynced = e.serviceAccountController.HasSynced
|
||||
e.secretsSynced = e.secretController.HasSynced
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// TokensController manages ServiceAccountToken secrets for ServiceAccount objects
|
||||
type TokensController struct {
|
||||
stopChan chan struct{}
|
||||
|
||||
client client.Interface
|
||||
token TokenGenerator
|
||||
|
||||
rootCA []byte
|
||||
|
||||
serviceAccounts cache.Indexer
|
||||
secrets cache.Indexer
|
||||
|
||||
// Since we join two objects, we'll watch both of them with controllers.
|
||||
serviceAccountController *framework.Controller
|
||||
secretController *framework.Controller
|
||||
|
||||
// These are here so tests can inject a 'return true'.
|
||||
serviceAccountsSynced func() bool
|
||||
secretsSynced func() bool
|
||||
}
|
||||
|
||||
// Runs controller loops and returns immediately
|
||||
func (e *TokensController) Run() {
|
||||
if e.stopChan == nil {
|
||||
e.stopChan = make(chan struct{})
|
||||
go e.serviceAccountController.Run(e.stopChan)
|
||||
go e.secretController.Run(e.stopChan)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down this controller
|
||||
func (e *TokensController) Stop() {
|
||||
if e.stopChan != nil {
|
||||
close(e.stopChan)
|
||||
e.stopChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountAdded reacts to a ServiceAccount creation by creating a corresponding ServiceAccountToken Secret
|
||||
func (e *TokensController) serviceAccountAdded(obj interface{}) {
|
||||
serviceAccount := obj.(*api.ServiceAccount)
|
||||
err := e.createSecretIfNeeded(serviceAccount)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountUpdated reacts to a ServiceAccount update (or re-list) by ensuring a corresponding ServiceAccountToken Secret exists
|
||||
func (e *TokensController) serviceAccountUpdated(oldObj interface{}, newObj interface{}) {
|
||||
newServiceAccount := newObj.(*api.ServiceAccount)
|
||||
err := e.createSecretIfNeeded(newServiceAccount)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountDeleted reacts to a ServiceAccount deletion by deleting all corresponding ServiceAccountToken Secrets
|
||||
func (e *TokensController) serviceAccountDeleted(obj interface{}) {
|
||||
serviceAccount, ok := obj.(*api.ServiceAccount)
|
||||
if !ok {
|
||||
// Unknown type. If we missed a ServiceAccount deletion, the
|
||||
// corresponding secrets will be cleaned up during the Secret re-list
|
||||
return
|
||||
}
|
||||
secrets, err := e.listTokenSecrets(serviceAccount)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
for _, secret := range secrets {
|
||||
glog.V(4).Infof("Deleting secret %s/%s because service account %s was deleted", secret.Namespace, secret.Name, serviceAccount.Name)
|
||||
if err := e.deleteSecret(secret); err != nil {
|
||||
glog.Errorf("Error deleting secret %s/%s: %v", secret.Namespace, secret.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// secretAdded reacts to a Secret create by ensuring the referenced ServiceAccount exists, and by adding a token to the secret if needed
|
||||
func (e *TokensController) secretAdded(obj interface{}) {
|
||||
secret := obj.(*api.Secret)
|
||||
serviceAccount, err := e.getServiceAccount(secret, true)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if serviceAccount == nil {
|
||||
glog.V(2).Infof(
|
||||
"Deleting new secret %s/%s because service account %s (uid=%s) was not found",
|
||||
secret.Namespace, secret.Name,
|
||||
secret.Annotations[api.ServiceAccountNameKey], secret.Annotations[api.ServiceAccountUIDKey])
|
||||
if err := e.deleteSecret(secret); err != nil {
|
||||
glog.Errorf("Error deleting secret %s/%s: %v", secret.Namespace, secret.Name, err)
|
||||
}
|
||||
} else {
|
||||
e.generateTokenIfNeeded(serviceAccount, secret)
|
||||
}
|
||||
}
|
||||
|
||||
// secretUpdated reacts to a Secret update (or re-list) by deleting the secret (if the referenced ServiceAccount does not exist)
|
||||
func (e *TokensController) secretUpdated(oldObj interface{}, newObj interface{}) {
|
||||
newSecret := newObj.(*api.Secret)
|
||||
newServiceAccount, err := e.getServiceAccount(newSecret, true)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if newServiceAccount == nil {
|
||||
glog.V(2).Infof(
|
||||
"Deleting updated secret %s/%s because service account %s (uid=%s) was not found",
|
||||
newSecret.Namespace, newSecret.Name,
|
||||
newSecret.Annotations[api.ServiceAccountNameKey], newSecret.Annotations[api.ServiceAccountUIDKey])
|
||||
if err := e.deleteSecret(newSecret); err != nil {
|
||||
glog.Errorf("Error deleting secret %s/%s: %v", newSecret.Namespace, newSecret.Name, err)
|
||||
}
|
||||
} else {
|
||||
e.generateTokenIfNeeded(newServiceAccount, newSecret)
|
||||
}
|
||||
}
|
||||
|
||||
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed
|
||||
func (e *TokensController) secretDeleted(obj interface{}) {
|
||||
secret, ok := obj.(*api.Secret)
|
||||
if !ok {
|
||||
// Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists)
|
||||
// will get a secret recreated (if needed) during the ServiceAccount re-list
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount, err := e.getServiceAccount(secret, false)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if serviceAccount == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name); err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// createSecretIfNeeded makes sure at least one ServiceAccountToken secret exists, and is included in the serviceAccount's Secrets list
|
||||
func (e *TokensController) createSecretIfNeeded(serviceAccount *api.ServiceAccount) error {
|
||||
// If the service account references no secrets, short-circuit and create a new one
|
||||
if len(serviceAccount.Secrets) == 0 {
|
||||
return e.createSecret(serviceAccount)
|
||||
}
|
||||
|
||||
// We shouldn't try to validate secret references until the secrets store is synced
|
||||
if !e.secretsSynced() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any existing token secrets are referenced by the service account, return
|
||||
allSecrets, err := e.listTokenSecrets(serviceAccount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
referencedSecrets := getSecretReferences(serviceAccount)
|
||||
for _, secret := range allSecrets {
|
||||
if referencedSecrets.Has(secret.Name) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise create a new token secret
|
||||
return e.createSecret(serviceAccount)
|
||||
}
|
||||
|
||||
// createSecret creates a secret of type ServiceAccountToken for the given ServiceAccount
|
||||
func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) error {
|
||||
// Build the secret
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)),
|
||||
Namespace: serviceAccount.Namespace,
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: serviceAccount.Name,
|
||||
api.ServiceAccountUIDKey: string(serviceAccount.UID),
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
|
||||
// Generate the token
|
||||
token, err := e.token.GenerateToken(*serviceAccount, *secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secret.Data[api.ServiceAccountTokenKey] = []byte(token)
|
||||
if e.rootCA != nil && len(e.rootCA) > 0 {
|
||||
secret.Data[api.ServiceAccountRootCAKey] = e.rootCA
|
||||
}
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Secrets(serviceAccount.Namespace).Create(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so add the secret to a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccount, err = serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serviceAccount.Secrets = append(serviceAccount.Secrets, api.ObjectReference{Name: secret.Name})
|
||||
|
||||
_, err = serviceAccounts.Update(serviceAccount)
|
||||
if err != nil {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
glog.V(2).Infof("Deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
if err := e.client.Secrets(secret.Namespace).Delete(secret.Name); err != nil {
|
||||
glog.Error(err) // if we fail, just log it
|
||||
}
|
||||
}
|
||||
if apierrors.IsConflict(err) {
|
||||
// nothing to do. We got a conflict, that means that the service account was updated. We simply need to return because we'll get an update notification later
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// generateTokenIfNeeded populates the token data for the given Secret if not already set
|
||||
func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAccount, secret *api.Secret) error {
|
||||
if secret.Annotations == nil {
|
||||
secret.Annotations = map[string]string{}
|
||||
}
|
||||
if secret.Data == nil {
|
||||
secret.Data = map[string][]byte{}
|
||||
}
|
||||
|
||||
caData := secret.Data[api.ServiceAccountRootCAKey]
|
||||
needsCA := len(e.rootCA) > 0 && bytes.Compare(caData, e.rootCA) != 0
|
||||
|
||||
tokenData := secret.Data[api.ServiceAccountTokenKey]
|
||||
needsToken := len(tokenData) == 0
|
||||
|
||||
if !needsCA && !needsToken {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the CA
|
||||
if needsCA {
|
||||
secret.Data[api.ServiceAccountRootCAKey] = e.rootCA
|
||||
}
|
||||
|
||||
// Generate the token
|
||||
if needsToken {
|
||||
token, err := e.token.GenerateToken(*serviceAccount, *secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secret.Data[api.ServiceAccountTokenKey] = []byte(token)
|
||||
}
|
||||
|
||||
// Set annotations
|
||||
secret.Annotations[api.ServiceAccountNameKey] = serviceAccount.Name
|
||||
secret.Annotations[api.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Secrets(secret.Namespace).Update(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteSecret deletes the given secret
|
||||
func (e *TokensController) deleteSecret(secret *api.Secret) error {
|
||||
return e.client.Secrets(secret.Namespace).Delete(secret.Name)
|
||||
}
|
||||
|
||||
// removeSecretReferenceIfNeeded updates the given ServiceAccount to remove a reference to the given secretName if needed.
|
||||
// Returns whether an update was performed, and any error that occurred
|
||||
func (e *TokensController) removeSecretReferenceIfNeeded(serviceAccount *api.ServiceAccount, secretName string) (bool, error) {
|
||||
// See if the account even referenced the secret
|
||||
if !getSecretReferences(serviceAccount).Has(secretName) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so remove the secret from a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccount, err := serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Double-check to see if the account still references the secret
|
||||
if !getSecretReferences(serviceAccount).Has(secretName) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
secrets := []api.ObjectReference{}
|
||||
for _, s := range serviceAccount.Secrets {
|
||||
if s.Name != secretName {
|
||||
secrets = append(secrets, s)
|
||||
}
|
||||
}
|
||||
serviceAccount.Secrets = secrets
|
||||
|
||||
_, err = serviceAccounts.Update(serviceAccount)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// getServiceAccount returns the ServiceAccount referenced by the given secret. If the secret is not
|
||||
// of type ServiceAccountToken, or if the referenced ServiceAccount does not exist, nil is returned
|
||||
func (e *TokensController) getServiceAccount(secret *api.Secret, fetchOnCacheMiss bool) (*api.ServiceAccount, error) {
|
||||
name, uid := serviceAccountNameAndUID(secret)
|
||||
if len(name) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
key := &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: secret.Namespace}}
|
||||
namespaceAccounts, err := e.serviceAccounts.Index("namespace", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, obj := range namespaceAccounts {
|
||||
serviceAccount := obj.(*api.ServiceAccount)
|
||||
if name != serviceAccount.Name {
|
||||
// Name must match
|
||||
continue
|
||||
}
|
||||
if len(uid) > 0 && uid != string(serviceAccount.UID) {
|
||||
// If UID is specified, it must match
|
||||
continue
|
||||
}
|
||||
return serviceAccount, nil
|
||||
}
|
||||
|
||||
if fetchOnCacheMiss {
|
||||
serviceAccount, err := e.client.ServiceAccounts(secret.Namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(uid) > 0 && uid != string(serviceAccount.UID) {
|
||||
// If UID is specified, it must match
|
||||
return nil, nil
|
||||
}
|
||||
return serviceAccount, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// listTokenSecrets returns a list of all of the ServiceAccountToken secrets that
|
||||
// reference the given service account's name and uid
|
||||
func (e *TokensController) listTokenSecrets(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) {
|
||||
key := &api.Secret{ObjectMeta: api.ObjectMeta{Namespace: serviceAccount.Namespace}}
|
||||
namespaceSecrets, err := e.secrets.Index("namespace", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := []*api.Secret{}
|
||||
for _, obj := range namespaceSecrets {
|
||||
secret := obj.(*api.Secret)
|
||||
name, uid := serviceAccountNameAndUID(secret)
|
||||
if name != serviceAccount.Name {
|
||||
// Name must match
|
||||
continue
|
||||
}
|
||||
if len(uid) > 0 && uid != string(serviceAccount.UID) {
|
||||
// If UID is specified, it must match
|
||||
continue
|
||||
}
|
||||
items = append(items, secret)
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// serviceAccountNameAndUID is a helper method to get the ServiceAccount Name and UID from the given secret
|
||||
// Returns "","" if the secret is not a ServiceAccountToken secret
|
||||
// If the name or uid annotation is missing, "" is returned instead
|
||||
func serviceAccountNameAndUID(secret *api.Secret) (string, string) {
|
||||
if secret.Type != api.SecretTypeServiceAccountToken {
|
||||
return "", ""
|
||||
}
|
||||
return secret.Annotations[api.ServiceAccountNameKey], secret.Annotations[api.ServiceAccountUIDKey]
|
||||
}
|
||||
|
||||
func getSecretReferences(serviceAccount *api.ServiceAccount) util.StringSet {
|
||||
references := util.NewStringSet()
|
||||
for _, secret := range serviceAccount.Secrets {
|
||||
references.Insert(secret.Name)
|
||||
}
|
||||
return references
|
||||
}
|
487
pkg/controller/serviceaccount/tokens_controller_test.go
Normal file
487
pkg/controller/serviceaccount/tokens_controller_test.go
Normal file
@@ -0,0 +1,487 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
utilrand "github.com/GoogleCloudPlatform/kubernetes/pkg/util/rand"
|
||||
)
|
||||
|
||||
type testGenerator struct {
|
||||
GeneratedServiceAccounts []api.ServiceAccount
|
||||
GeneratedSecrets []api.Secret
|
||||
Token string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (t *testGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) {
|
||||
t.GeneratedSecrets = append(t.GeneratedSecrets, secret)
|
||||
t.GeneratedServiceAccounts = append(t.GeneratedServiceAccounts, serviceAccount)
|
||||
return t.Token, t.Err
|
||||
}
|
||||
|
||||
// emptySecretReferences is used by a service account without any secrets
|
||||
func emptySecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{}
|
||||
}
|
||||
|
||||
// missingSecretReferences is used by a service account that references secrets which do no exist
|
||||
func missingSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "missing-secret-1"}}
|
||||
}
|
||||
|
||||
// regularSecretReferences is used by a service account that references secrets which are not ServiceAccountTokens
|
||||
func regularSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "regular-secret-1"}}
|
||||
}
|
||||
|
||||
// tokenSecretReferences is used by a service account that references a ServiceAccountToken secret
|
||||
func tokenSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "token-secret-1"}}
|
||||
}
|
||||
|
||||
// addTokenSecretReference adds a reference to the ServiceAccountToken that will be created
|
||||
func addTokenSecretReference(refs []api.ObjectReference) []api.ObjectReference {
|
||||
return append(refs, api.ObjectReference{Name: "default-token-fplln"})
|
||||
}
|
||||
|
||||
// serviceAccount returns a service account with the given secret refs
|
||||
func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount {
|
||||
return &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "default",
|
||||
UID: "12345",
|
||||
Namespace: "default",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Secrets: secretRefs,
|
||||
}
|
||||
}
|
||||
|
||||
// opaqueSecret returns a persisted non-ServiceAccountToken secret named "regular-secret-1"
|
||||
func opaqueSecret() *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "regular-secret-1",
|
||||
Namespace: "default",
|
||||
UID: "23456",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Type: "Opaque",
|
||||
Data: map[string][]byte{
|
||||
"mykey": []byte("mydata"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createdTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret.
|
||||
// Named "default-token-fplln", since that is the first generated name after rand.Seed(1)
|
||||
func createdTokenSecret() *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "default-token-fplln",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: "default",
|
||||
api.ServiceAccountUIDKey: "12345",
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"token": []byte("ABC"),
|
||||
"ca.crt": []byte("CA Data"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecret returns an existing ServiceAccountToken secret named "token-secret-1"
|
||||
func serviceAccountTokenSecret() *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "token-secret-1",
|
||||
Namespace: "default",
|
||||
UID: "23456",
|
||||
ResourceVersion: "1",
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: "default",
|
||||
api.ServiceAccountUIDKey: "12345",
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"token": []byte("ABC"),
|
||||
"ca.crt": []byte("CA Data"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithoutTokenData returns an existing ServiceAccountToken secret that lacks token data
|
||||
func serviceAccountTokenSecretWithoutTokenData() *api.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
delete(secret.Data, api.ServiceAccountTokenKey)
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithoutCAData returns an existing ServiceAccountToken secret that lacks ca data
|
||||
func serviceAccountTokenSecretWithoutCAData() *api.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
delete(secret.Data, api.ServiceAccountRootCAKey)
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithCAData returns an existing ServiceAccountToken secret with the specified ca data
|
||||
func serviceAccountTokenSecretWithCAData(data []byte) *api.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
secret.Data[api.ServiceAccountRootCAKey] = data
|
||||
return secret
|
||||
}
|
||||
|
||||
func TestTokenCreation(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
ClientObjects []runtime.Object
|
||||
|
||||
SecretsSyncPending bool
|
||||
ServiceAccountsSyncPending bool
|
||||
|
||||
ExistingServiceAccount *api.ServiceAccount
|
||||
ExistingSecrets []*api.Secret
|
||||
|
||||
AddedServiceAccount *api.ServiceAccount
|
||||
UpdatedServiceAccount *api.ServiceAccount
|
||||
DeletedServiceAccount *api.ServiceAccount
|
||||
AddedSecret *api.Secret
|
||||
UpdatedSecret *api.Secret
|
||||
DeletedSecret *api.Secret
|
||||
|
||||
ExpectedActions []testclient.FakeAction
|
||||
}{
|
||||
"new serviceaccount with no secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(emptySecretReferences()))},
|
||||
},
|
||||
},
|
||||
"new serviceaccount with no secrets with unsynced secret store": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
SecretsSyncPending: true,
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(emptySecretReferences()))},
|
||||
},
|
||||
},
|
||||
"new serviceaccount with missing secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(missingSecretReferences()))},
|
||||
},
|
||||
},
|
||||
"new serviceaccount with missing secrets with unsynced secret store": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
SecretsSyncPending: true,
|
||||
|
||||
AddedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"new serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(regularSecretReferences()))},
|
||||
},
|
||||
},
|
||||
"new serviceaccount with token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences()), serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
|
||||
"updated serviceaccount with no secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(emptySecretReferences()))},
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with no secrets with unsynced secret store": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()},
|
||||
|
||||
SecretsSyncPending: true,
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(emptySecretReferences()))},
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with missing secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(missingSecretReferences()))},
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with missing secrets with unsynced secret store": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()},
|
||||
|
||||
SecretsSyncPending: true,
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"updated serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "create-secret", Value: createdTokenSecret()},
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(addTokenSecretReference(regularSecretReferences()))},
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with token secrets": {
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
|
||||
"deleted serviceaccount with no secrets": {
|
||||
DeletedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"deleted serviceaccount with missing secrets": {
|
||||
DeletedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"deleted serviceaccount with non-token secrets": {
|
||||
ClientObjects: []runtime.Object{opaqueSecret()},
|
||||
|
||||
DeletedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"deleted serviceaccount with token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
DeletedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "delete-secret", Value: "token-secret-1"},
|
||||
},
|
||||
},
|
||||
|
||||
"added secret without serviceaccount": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
|
||||
AddedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "delete-secret", Value: "token-secret-1"},
|
||||
},
|
||||
},
|
||||
"added secret with serviceaccount": {
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"added token secret without token data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
"added token secret without ca data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutCAData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
"added token secret with mismatched ca data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithCAData([]byte("mismatched"))},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
|
||||
"updated secret without serviceaccount": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "delete-secret", Value: "token-secret-1"},
|
||||
},
|
||||
},
|
||||
"updated secret with serviceaccount": {
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"updated token secret without token data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
"updated token secret without ca data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutCAData()},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
"updated token secret with mismatched ca data": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecretWithCAData([]byte("mismatched"))},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "update-secret", Value: serviceAccountTokenSecret()},
|
||||
},
|
||||
},
|
||||
|
||||
"deleted secret without serviceaccount": {
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
"deleted secret with serviceaccount with reference": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences())},
|
||||
ExistingServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{
|
||||
{Action: "get-serviceaccount", Value: "default"},
|
||||
{Action: "update-serviceaccount", Value: serviceAccount(emptySecretReferences())},
|
||||
},
|
||||
},
|
||||
"deleted secret with serviceaccount without reference": {
|
||||
ExistingServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []testclient.FakeAction{},
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
|
||||
// Re-seed to reset name generation
|
||||
utilrand.Seed(1)
|
||||
|
||||
generator := &testGenerator{Token: "ABC"}
|
||||
|
||||
client := testclient.NewSimpleFake(tc.ClientObjects...)
|
||||
|
||||
controller := NewTokensController(client, TokensControllerOptions{TokenGenerator: generator, RootCA: []byte("CA Data")})
|
||||
|
||||
// Tell the token controller whether its stores have been synced
|
||||
controller.serviceAccountsSynced = func() bool { return !tc.ServiceAccountsSyncPending }
|
||||
controller.secretsSynced = func() bool { return !tc.SecretsSyncPending }
|
||||
|
||||
if tc.ExistingServiceAccount != nil {
|
||||
controller.serviceAccounts.Add(tc.ExistingServiceAccount)
|
||||
}
|
||||
for _, s := range tc.ExistingSecrets {
|
||||
controller.secrets.Add(s)
|
||||
}
|
||||
|
||||
if tc.AddedServiceAccount != nil {
|
||||
controller.serviceAccountAdded(tc.AddedServiceAccount)
|
||||
}
|
||||
if tc.UpdatedServiceAccount != nil {
|
||||
controller.serviceAccountUpdated(nil, tc.UpdatedServiceAccount)
|
||||
}
|
||||
if tc.DeletedServiceAccount != nil {
|
||||
controller.serviceAccountDeleted(tc.DeletedServiceAccount)
|
||||
}
|
||||
if tc.AddedSecret != nil {
|
||||
controller.secretAdded(tc.AddedSecret)
|
||||
}
|
||||
if tc.UpdatedSecret != nil {
|
||||
controller.secretUpdated(nil, tc.UpdatedSecret)
|
||||
}
|
||||
if tc.DeletedSecret != nil {
|
||||
controller.secretDeleted(tc.DeletedSecret)
|
||||
}
|
||||
|
||||
actions := client.Actions()
|
||||
for i, action := range actions {
|
||||
if len(tc.ExpectedActions) < i+1 {
|
||||
t.Errorf("%s: %d unexpected actions: %+v", k, len(actions)-len(tc.ExpectedActions), actions[i:])
|
||||
break
|
||||
}
|
||||
|
||||
expectedAction := tc.ExpectedActions[i]
|
||||
if expectedAction.Action != action.Action {
|
||||
t.Errorf("%s: Expected %s, got %s", k, expectedAction.Action, action.Action)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(expectedAction.Value, action.Value) {
|
||||
t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, expectedAction.Value, action.Value)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(tc.ExpectedActions) > len(actions) {
|
||||
t.Errorf("%s: %d additional expected actions:%+v", k, len(tc.ExpectedActions)-len(actions), tc.ExpectedActions[len(actions):])
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user