Support scale subresource for PDBs (#76294)

* Support scale subresource for PDBs

* Check group in finder functions

* Small fixes and more tests
This commit is contained in:
Morten Torkildsen
2019-05-23 22:24:18 -07:00
committed by Kubernetes Prow Robot
parent cdff17a96b
commit f1883c9e8c
16 changed files with 808 additions and 50 deletions

View File

@@ -19,7 +19,9 @@ go_library(
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
@@ -34,6 +36,7 @@ go_library(
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
@@ -49,13 +52,20 @@ go_test(
"//pkg/apis/core/install:go_default_library",
"//pkg/controller:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/scale/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",

View File

@@ -26,7 +26,9 @@ import (
policy "k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -41,6 +43,7 @@ import (
appsv1listers "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
policylisters "k8s.io/client-go/listers/policy/v1beta1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
@@ -67,6 +70,9 @@ type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct {
kubeClient clientset.Interface
mapper apimeta.RESTMapper
scaleNamespacer scaleclient.ScalesGetter
pdbLister policylisters.PodDisruptionBudgetLister
pdbListerSynced cache.InformerSynced
@@ -105,7 +111,7 @@ type controllerAndScale struct {
// podControllerFinder is a function type that maps a pod to a list of
// controllers and their scale.
type podControllerFinder func(*v1.Pod) (*controllerAndScale, error)
type podControllerFinder func(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error)
func NewDisruptionController(
podInformer coreinformers.PodInformer,
@@ -115,6 +121,8 @@ func NewDisruptionController(
dInformer appsv1informers.DeploymentInformer,
ssInformer appsv1informers.StatefulSetInformer,
kubeClient clientset.Interface,
restMapper apimeta.RESTMapper,
scaleNamespacer scaleclient.ScalesGetter,
) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
@@ -157,19 +165,19 @@ func NewDisruptionController(
dc.ssLister = ssInformer.Lister()
dc.ssListerSynced = ssInformer.Informer().HasSynced
dc.mapper = restMapper
dc.scaleNamespacer = scaleNamespacer
return dc
}
// TODO(mml): When controllerRef is implemented (#2210), we *could* simply
// return controllers without their scales, and access scale type-generically
// via the scale subresource. That may not be as much of a win as it sounds,
// however. We are accessing everything through the pkg/client/cache API that
// we have to set up and tune to the types we know we'll be accessing anyway,
// and we may well need further tweaks just to be able to access scale
// subresources.
// The workload resources do implement the scale subresource, so it would
// be possible to only check the scale subresource here. But since there is no
// way to take advantage of listers with scale subresources, we use the workload
// resources directly and only fall back to the scale subresource when needed.
func (dc *DisruptionController) finders() []podControllerFinder {
return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet,
dc.getPodStatefulSet}
dc.getPodStatefulSet, dc.getScaleController}
}
var (
@@ -180,15 +188,12 @@ var (
)
// getPodReplicaSet finds a replicaset which has no matching deployments.
func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
func (dc *DisruptionController) getPodReplicaSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
if !ok || err != nil {
return nil, err
}
if controllerRef.Kind != controllerKindRS.Kind {
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
@@ -204,16 +209,13 @@ func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndSca
return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil
}
// getPodStatefulSet returns the statefulset managing the given pod.
func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
// getPodStatefulSet returns the statefulset referenced by the provided controllerRef.
func (dc *DisruptionController) getPodStatefulSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
ok, err := verifyGroupKind(controllerRef, controllerKindSS.Kind, []string{"apps"})
if !ok || err != nil {
return nil, err
}
if controllerRef.Kind != controllerKindSS.Kind {
return nil, nil
}
ss, err := dc.ssLister.StatefulSets(pod.Namespace).Get(controllerRef.Name)
ss, err := dc.ssLister.StatefulSets(namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
@@ -226,15 +228,12 @@ func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndSc
}
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
func (dc *DisruptionController) getPodDeployment(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
if !ok || err != nil {
return nil, err
}
if controllerRef.Kind != controllerKindRS.Kind {
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
@@ -246,8 +245,10 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindDep.Kind {
return nil, nil
ok, err = verifyGroupKind(controllerRef, controllerKindDep.Kind, []string{"apps", "extensions"})
if !ok || err != nil {
return nil, err
}
deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name)
if err != nil {
@@ -260,15 +261,12 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil
}
func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
func (dc *DisruptionController) getPodReplicationController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
ok, err := verifyGroupKind(controllerRef, controllerKindRC.Kind, []string{""})
if !ok || err != nil {
return nil, err
}
if controllerRef.Kind != controllerKindRC.Kind {
return nil, nil
}
rc, err := dc.rcLister.ReplicationControllers(pod.Namespace).Get(controllerRef.Name)
rc, err := dc.rcLister.ReplicationControllers(namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
@@ -279,6 +277,55 @@ func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*contr
return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil
}
func (dc *DisruptionController) getScaleController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
if err != nil {
return nil, err
}
gk := schema.GroupKind{
Group: gv.Group,
Kind: controllerRef.Kind,
}
mapping, err := dc.mapper.RESTMapping(gk, gv.Version)
if err != nil {
return nil, err
}
gr := mapping.Resource.GroupResource()
scale, err := dc.scaleNamespacer.Scales(namespace).Get(gr, controllerRef.Name)
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
if scale.UID != controllerRef.UID {
return nil, nil
}
return &controllerAndScale{scale.UID, scale.Spec.Replicas}, nil
}
func verifyGroupKind(controllerRef *metav1.OwnerReference, expectedKind string, expectedGroups []string) (bool, error) {
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
if err != nil {
return false, err
}
if controllerRef.Kind != expectedKind {
return false, nil
}
for _, group := range expectedGroups {
if group == gv.Group {
return true, nil
}
}
return false, nil
}
func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
@@ -583,10 +630,23 @@ func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget
// 1. Find the controller for each pod. If any pod has 0 controllers,
// that's an error. With ControllerRef, a pod can only have 1 controller.
for _, pod := range pods {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
err = fmt.Errorf("found no controller ref for pod %q", pod.Name)
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllerRef", err.Error())
return
}
// If we already know the scale of the controller there is no need to do anything.
if _, found := controllerScale[controllerRef.UID]; found {
continue
}
// Check all the supported controllers to find the desired scale.
foundController := false
for _, finder := range dc.finders() {
var controllerNScale *controllerAndScale
controllerNScale, err = finder(pod)
controllerNScale, err = finder(controllerRef, pod.Namespace)
if err != nil {
return
}

View File

@@ -23,13 +23,20 @@ import (
"time"
apps "k8s.io/api/apps/v1"
autoscalingapi "k8s.io/api/autoscaling/v1"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
_ "k8s.io/kubernetes/pkg/apis/core/install"
@@ -90,6 +97,14 @@ type disruptionController struct {
rsStore cache.Store
dStore cache.Store
ssStore cache.Store
scaleClient *scalefake.FakeScaleClient
}
var customGVK = schema.GroupVersionKind{
Group: "custom.k8s.io",
Version: "v1",
Kind: "customresource",
}
func newFakeDisruptionController() (*disruptionController, *pdbStates) {
@@ -97,6 +112,10 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
scheme := runtime.NewScheme()
scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
fakeScaleClient := &scalefake.FakeScaleClient{}
dc := NewDisruptionController(
informerFactory.Core().V1().Pods(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
@@ -105,6 +124,8 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory.Apps().V1().Deployments(),
informerFactory.Apps().V1().StatefulSets(),
nil,
testrestmapper.TestOnlyStaticRESTMapper(scheme),
fakeScaleClient,
)
dc.getUpdater = func() updater { return ps.Set }
dc.podListerSynced = alwaysReady
@@ -122,6 +143,7 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
informerFactory.Apps().V1().Deployments().Informer().GetStore(),
informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
fakeScaleClient,
}, ps
}
@@ -490,6 +512,52 @@ func TestReplicaSet(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
}
func TestScaleResource(t *testing.T) {
customResourceUID := uuid.NewUUID()
replicas := int32(10)
pods := int32(4)
maxUnavailable := int32(5)
dc, ps := newFakeDisruptionController()
dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &autoscalingapi.Scale{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
UID: customResourceUID,
},
Spec: autoscalingapi.ScaleSpec{
Replicas: replicas,
},
}
return true, obj, nil
})
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
add(t, dc.pdbStore, pdb)
trueVal := true
for i := 0; i < int(pods); i++ {
pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
pod.SetOwnerReferences([]metav1.OwnerReference{
{
Kind: customGVK.Kind,
APIVersion: customGVK.GroupVersion().String(),
Controller: &trueVal,
UID: customResourceUID,
},
})
add(t, dc.podStore, pod)
}
dc.sync(pdbName)
disruptionsAllowed := int32(0)
if replicas-pods < maxUnavailable {
disruptionsAllowed = maxUnavailable - (replicas - pods)
}
ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
}
// Verify that multiple controllers doesn't allow the PDB to be set true.
func TestMultipleControllers(t *testing.T) {
const podCount = 2
@@ -759,3 +827,202 @@ func TestUpdateDisruptedPods(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
}
func TestBasicFinderFunctions(t *testing.T) {
dc, _ := newFakeDisruptionController()
rs, _ := newReplicaSet(t, 10)
add(t, dc.rsStore, rs)
rc, _ := newReplicationController(t, 12)
add(t, dc.rcStore, rc)
ss, _ := newStatefulSet(t, 14)
add(t, dc.ssStore, ss)
testCases := map[string]struct {
finderFunc podControllerFinder
apiVersion string
kind string
name string
uid types.UID
findsScale bool
expectedScale int32
}{
"replicaset controller with apps group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: true,
expectedScale: 10,
},
"replicaset controller with invalid group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "invalid/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: false,
},
"replicationcontroller with empty group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: true,
expectedScale: 12,
},
"replicationcontroller with invalid group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "apps/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: false,
},
"statefulset controller with extensions group": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindSS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: true,
expectedScale: 14,
},
"statefulset controller with invalid kind": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
controllerRef := &metav1.OwnerReference{
APIVersion: tc.apiVersion,
Kind: tc.kind,
Name: tc.name,
UID: tc.uid,
}
controllerAndScale, _ := tc.finderFunc(controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, tc.uid; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}
func TestDeploymentFinderFunction(t *testing.T) {
labels := map[string]string{
"foo": "bar",
}
testCases := map[string]struct {
rsApiVersion string
rsKind string
depApiVersion string
depKind string
findsScale bool
expectedScale int32
}{
"happy path": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: controllerKindDep.Kind,
findsScale: true,
expectedScale: 10,
},
"invalid rs apiVersion": {
rsApiVersion: "invalid/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid rs kind": {
rsApiVersion: "apps/v1",
rsKind: "InvalidKind",
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment apiVersion": {
rsApiVersion: "extensions/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "deployment/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment kind": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: "InvalidKind",
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
dc, _ := newFakeDisruptionController()
dep, _ := newDeployment(t, 10)
dep.Spec.Selector = newSel(labels)
add(t, dc.dStore, dep)
rs, _ := newReplicaSet(t, 5)
rs.Labels = labels
trueVal := true
rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
APIVersion: tc.depApiVersion,
Kind: tc.depKind,
Name: dep.Name,
UID: dep.UID,
Controller: &trueVal,
})
add(t, dc.rsStore, rs)
controllerRef := &metav1.OwnerReference{
APIVersion: tc.rsApiVersion,
Kind: tc.rsKind,
Name: rs.Name,
UID: rs.UID,
}
controllerAndScale, _ := dc.getPodDeployment(controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, dep.UID; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}