
This makes the Kubelet config key in the ConfigMap an explicit part of the API, so we can stop using magic key names. As part of this change, we are retiring ConfigMapRef for ConfigMap.
558 lines
21 KiB
Go
558 lines
21 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package auth
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"testing"
|
|
"time"
|
|
|
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
|
"k8s.io/apiserver/pkg/authentication/token/tokenfile"
|
|
"k8s.io/apiserver/pkg/authentication/user"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
|
versionedinformers "k8s.io/client-go/informers"
|
|
externalclientset "k8s.io/client-go/kubernetes"
|
|
restclient "k8s.io/client-go/rest"
|
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
|
api "k8s.io/kubernetes/pkg/apis/core"
|
|
"k8s.io/kubernetes/pkg/apis/policy"
|
|
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
|
|
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
|
"k8s.io/kubernetes/test/integration/framework"
|
|
)
|
|
|
|
func TestNodeAuthorizer(t *testing.T) {
|
|
// Start the server so we know the address
|
|
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
|
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
<-h.Initialized
|
|
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
|
}))
|
|
|
|
const (
|
|
// Define credentials
|
|
tokenMaster = "master-token"
|
|
tokenNodeUnknown = "unknown-token"
|
|
tokenNode1 = "node1-token"
|
|
tokenNode2 = "node2-token"
|
|
)
|
|
|
|
authenticator := bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
|
tokenMaster: {Name: "admin", Groups: []string{"system:masters"}},
|
|
tokenNodeUnknown: {Name: "unknown", Groups: []string{"system:nodes"}},
|
|
tokenNode1: {Name: "system:node:node1", Groups: []string{"system:nodes"}},
|
|
tokenNode2: {Name: "system:node:node2", Groups: []string{"system:nodes"}},
|
|
}))
|
|
|
|
// Build client config, clientset, and informers
|
|
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
|
superuserClient, superuserClientExternal := clientsetForToken(tokenMaster, clientConfig)
|
|
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
|
|
versionedInformerFactory := versionedinformers.NewSharedInformerFactory(superuserClientExternal, time.Minute)
|
|
|
|
// Enabled CSIPersistentVolume feature at startup so volumeattachments get watched
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
|
|
|
// Enable DynamicKubeletConfig feature so that Node.Spec.ConfigSource can be set
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicKubeletConfig, true)()
|
|
|
|
// Set up Node+RBAC authorizer
|
|
authorizerConfig := &authorizer.AuthorizationConfig{
|
|
AuthorizationModes: []string{"Node", "RBAC"},
|
|
InformerFactory: informerFactory,
|
|
VersionedInformerFactory: versionedInformerFactory,
|
|
}
|
|
nodeRBACAuthorizer, _, err := authorizerConfig.New()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Set up NodeRestriction admission
|
|
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
|
|
nodeRestrictionAdmission.SetInternalKubeClientSet(superuserClient)
|
|
if err := nodeRestrictionAdmission.ValidateInitialization(); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Start the server
|
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
|
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
|
masterConfig.GenericConfig.Authorization.Authorizer = nodeRBACAuthorizer
|
|
masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission
|
|
|
|
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
|
defer closeFn()
|
|
|
|
// Start the informers
|
|
stopCh := make(chan struct{})
|
|
defer close(stopCh)
|
|
informerFactory.Start(stopCh)
|
|
versionedInformerFactory.Start(stopCh)
|
|
|
|
// Wait for a healthy server
|
|
for {
|
|
result := superuserClient.Core().RESTClient().Get().AbsPath("/healthz").Do()
|
|
_, err := result.Raw()
|
|
if err == nil {
|
|
break
|
|
}
|
|
t.Log(err)
|
|
time.Sleep(time.Second)
|
|
}
|
|
|
|
// Create objects
|
|
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
pvName := "mypv"
|
|
if _, err := superuserClientExternal.StorageV1beta1().VolumeAttachments().Create(&storagev1beta1.VolumeAttachment{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "myattachment"},
|
|
Spec: storagev1beta1.VolumeAttachmentSpec{
|
|
Attacher: "foo",
|
|
Source: storagev1beta1.VolumeAttachmentSource{PersistentVolumeName: &pvName},
|
|
NodeName: "node2",
|
|
},
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := superuserClient.Core().PersistentVolumeClaims("ns").Create(&api.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "mypvc"},
|
|
Spec: api.PersistentVolumeClaimSpec{
|
|
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
|
|
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceStorage: resource.MustParse("1")}},
|
|
},
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if _, err := superuserClient.Core().PersistentVolumes().Create(&api.PersistentVolume{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "mypv"},
|
|
Spec: api.PersistentVolumeSpec{
|
|
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
|
|
Capacity: api.ResourceList{api.ResourceStorage: resource.MustParse("1")},
|
|
ClaimRef: &api.ObjectReference{Namespace: "ns", Name: "mypvc"},
|
|
PersistentVolumeSource: api.PersistentVolumeSource{AzureFile: &api.AzureFilePersistentVolumeSource{ShareName: "default", SecretName: "mypvsecret"}},
|
|
},
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
getSecret := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getPVSecret := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getConfigMap := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getConfigMapConfigSource := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().ConfigMaps("ns").Get("myconfigmapconfigsource", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getPVC := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getPV := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
getVolumeAttachment := func(client externalclientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.StorageV1beta1().VolumeAttachments().Get("myattachment", metav1.GetOptions{})
|
|
return err
|
|
}
|
|
}
|
|
|
|
createNode2NormalPod := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
|
Spec: api.PodSpec{
|
|
NodeName: "node2",
|
|
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
|
Volumes: []api.Volume{
|
|
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}},
|
|
{Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}},
|
|
{Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}},
|
|
},
|
|
},
|
|
})
|
|
return err
|
|
}
|
|
}
|
|
updateNode2NormalPodStatus := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
startTime := metav1.NewTime(time.Now())
|
|
_, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
|
|
Status: api.PodStatus{StartTime: &startTime},
|
|
})
|
|
return err
|
|
}
|
|
}
|
|
deleteNode2NormalPod := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
zero := int64(0)
|
|
return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
|
}
|
|
}
|
|
|
|
createNode2MirrorPod := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Pods("ns").Create(&api.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "node2mirrorpod",
|
|
Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"},
|
|
},
|
|
Spec: api.PodSpec{
|
|
NodeName: "node2",
|
|
Containers: []api.Container{{Name: "image", Image: "busybox"}},
|
|
},
|
|
})
|
|
return err
|
|
}
|
|
}
|
|
deleteNode2MirrorPod := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
zero := int64(0)
|
|
return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
|
}
|
|
}
|
|
|
|
createNode2 := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}})
|
|
return err
|
|
}
|
|
}
|
|
setNode2ConfigSource := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
node2, err := client.Core().Nodes().Get("node2", metav1.GetOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
node2.Spec.ConfigSource = &api.NodeConfigSource{
|
|
ConfigMap: &api.ConfigMapNodeConfigSource{
|
|
Namespace: "ns",
|
|
Name: "myconfigmapconfigsource",
|
|
// validation just requires UID to be non-empty and it isn't necessary for GET,
|
|
// so we just use a bogus one for the test
|
|
UID: "uid",
|
|
KubeletConfigKey: "kubelet",
|
|
},
|
|
}
|
|
_, err = client.Core().Nodes().Update(node2)
|
|
return err
|
|
}
|
|
}
|
|
unsetNode2ConfigSource := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
node2, err := client.Core().Nodes().Get("node2", metav1.GetOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
node2.Spec.ConfigSource = nil
|
|
_, err = client.Core().Nodes().Update(node2)
|
|
return err
|
|
}
|
|
}
|
|
updateNode2Status := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
|
Status: api.NodeStatus{},
|
|
})
|
|
return err
|
|
}
|
|
}
|
|
deleteNode2 := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
return client.Core().Nodes().Delete("node2", nil)
|
|
}
|
|
}
|
|
createNode2NormalPodEviction := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
|
TypeMeta: metav1.TypeMeta{
|
|
APIVersion: "policy/v1beta1",
|
|
Kind: "Eviction",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "node2normalpod",
|
|
Namespace: "ns",
|
|
},
|
|
})
|
|
}
|
|
}
|
|
createNode2MirrorPodEviction := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
return client.Policy().Evictions("ns").Evict(&policy.Eviction{
|
|
TypeMeta: metav1.TypeMeta{
|
|
APIVersion: "policy/v1beta1",
|
|
Kind: "Eviction",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "node2mirrorpod",
|
|
Namespace: "ns",
|
|
},
|
|
})
|
|
}
|
|
}
|
|
|
|
capacity := 50
|
|
updatePVCCapacity := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
capacity++
|
|
statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity)
|
|
patchBytes := []byte(statusString)
|
|
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
|
return err
|
|
}
|
|
}
|
|
|
|
updatePVCPhase := func(client clientset.Interface) func() error {
|
|
return func() error {
|
|
patchBytes := []byte(`{"status":{"phase": "Bound"}}`)
|
|
_, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status")
|
|
return err
|
|
}
|
|
}
|
|
|
|
nodeanonClient, _ := clientsetForToken(tokenNodeUnknown, clientConfig)
|
|
node1Client, node1ClientExternal := clientsetForToken(tokenNode1, clientConfig)
|
|
node2Client, node2ClientExternal := clientsetForToken(tokenNode2, clientConfig)
|
|
|
|
// all node requests from node1 and unknown node fail
|
|
expectForbidden(t, getSecret(nodeanonClient))
|
|
expectForbidden(t, getPVSecret(nodeanonClient))
|
|
expectForbidden(t, getConfigMap(nodeanonClient))
|
|
expectForbidden(t, getPVC(nodeanonClient))
|
|
expectForbidden(t, getPV(nodeanonClient))
|
|
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
|
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
|
|
expectForbidden(t, deleteNode2NormalPod(nodeanonClient))
|
|
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
|
|
expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient))
|
|
expectForbidden(t, createNode2(nodeanonClient))
|
|
expectForbidden(t, updateNode2Status(nodeanonClient))
|
|
expectForbidden(t, deleteNode2(nodeanonClient))
|
|
|
|
expectForbidden(t, getSecret(node1Client))
|
|
expectForbidden(t, getPVSecret(node1Client))
|
|
expectForbidden(t, getConfigMap(node1Client))
|
|
expectForbidden(t, getPVC(node1Client))
|
|
expectForbidden(t, getPV(node1Client))
|
|
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
|
expectForbidden(t, createNode2MirrorPod(node1Client))
|
|
expectNotFound(t, deleteNode2MirrorPod(node1Client))
|
|
expectNotFound(t, createNode2MirrorPodEviction(node1Client))
|
|
expectForbidden(t, createNode2(node1Client))
|
|
expectForbidden(t, updateNode2Status(node1Client))
|
|
expectForbidden(t, deleteNode2(node1Client))
|
|
|
|
// related object requests from node2 fail
|
|
expectForbidden(t, getSecret(node2Client))
|
|
expectForbidden(t, getPVSecret(node2Client))
|
|
expectForbidden(t, getConfigMap(node2Client))
|
|
expectForbidden(t, getPVC(node2Client))
|
|
expectForbidden(t, getPV(node2Client))
|
|
|
|
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
|
// mirror pod and self node lifecycle is allowed
|
|
expectAllowed(t, createNode2MirrorPod(node2Client))
|
|
expectAllowed(t, deleteNode2MirrorPod(node2Client))
|
|
expectAllowed(t, createNode2MirrorPod(node2Client))
|
|
expectAllowed(t, createNode2MirrorPodEviction(node2Client))
|
|
expectAllowed(t, createNode2(node2Client))
|
|
expectAllowed(t, updateNode2Status(node2Client))
|
|
// cleanup node
|
|
expectAllowed(t, deleteNode2(superuserClient))
|
|
|
|
// create a pod as an admin to add object references
|
|
expectAllowed(t, createNode2NormalPod(superuserClient))
|
|
|
|
// unidentifiable node and node1 are still forbidden
|
|
expectForbidden(t, getSecret(nodeanonClient))
|
|
expectForbidden(t, getPVSecret(nodeanonClient))
|
|
expectForbidden(t, getConfigMap(nodeanonClient))
|
|
expectForbidden(t, getPVC(nodeanonClient))
|
|
expectForbidden(t, getPV(nodeanonClient))
|
|
expectForbidden(t, createNode2NormalPod(nodeanonClient))
|
|
expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient))
|
|
expectForbidden(t, deleteNode2NormalPod(nodeanonClient))
|
|
expectForbidden(t, createNode2NormalPodEviction(nodeanonClient))
|
|
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
|
|
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
|
|
expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient))
|
|
|
|
expectForbidden(t, getSecret(node1Client))
|
|
expectForbidden(t, getPVSecret(node1Client))
|
|
expectForbidden(t, getConfigMap(node1Client))
|
|
expectForbidden(t, getPVC(node1Client))
|
|
expectForbidden(t, getPV(node1Client))
|
|
expectForbidden(t, createNode2NormalPod(node1Client))
|
|
expectForbidden(t, updateNode2NormalPodStatus(node1Client))
|
|
expectForbidden(t, deleteNode2NormalPod(node1Client))
|
|
expectForbidden(t, createNode2NormalPodEviction(node1Client))
|
|
expectForbidden(t, createNode2MirrorPod(node1Client))
|
|
expectNotFound(t, deleteNode2MirrorPod(node1Client))
|
|
expectNotFound(t, createNode2MirrorPodEviction(node1Client))
|
|
|
|
// node2 can get referenced objects now
|
|
expectAllowed(t, getSecret(node2Client))
|
|
expectAllowed(t, getPVSecret(node2Client))
|
|
expectAllowed(t, getConfigMap(node2Client))
|
|
expectAllowed(t, getPVC(node2Client))
|
|
expectAllowed(t, getPV(node2Client))
|
|
|
|
expectForbidden(t, createNode2NormalPod(node2Client))
|
|
expectAllowed(t, updateNode2NormalPodStatus(node2Client))
|
|
expectAllowed(t, deleteNode2NormalPod(node2Client))
|
|
expectAllowed(t, createNode2MirrorPod(node2Client))
|
|
expectAllowed(t, deleteNode2MirrorPod(node2Client))
|
|
|
|
// recreate as an admin to test eviction
|
|
expectAllowed(t, createNode2NormalPod(superuserClient))
|
|
expectAllowed(t, createNode2MirrorPod(superuserClient))
|
|
expectAllowed(t, createNode2NormalPodEviction(node2Client))
|
|
expectAllowed(t, createNode2MirrorPodEviction(node2Client))
|
|
|
|
// re-create a pod as an admin to add object references
|
|
expectAllowed(t, createNode2NormalPod(superuserClient))
|
|
|
|
// ExpandPersistentVolumes feature disabled
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, false)()
|
|
expectForbidden(t, updatePVCCapacity(node1Client))
|
|
expectForbidden(t, updatePVCCapacity(node2Client))
|
|
|
|
// ExpandPersistentVolumes feature enabled
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandPersistentVolumes, true)()
|
|
expectForbidden(t, updatePVCCapacity(node1Client))
|
|
expectAllowed(t, updatePVCCapacity(node2Client))
|
|
expectForbidden(t, updatePVCPhase(node2Client))
|
|
|
|
// Disabled CSIPersistentVolume feature
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, false)()
|
|
expectForbidden(t, getVolumeAttachment(node1ClientExternal))
|
|
expectForbidden(t, getVolumeAttachment(node2ClientExternal))
|
|
// Enabled CSIPersistentVolume feature
|
|
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
|
expectForbidden(t, getVolumeAttachment(node1ClientExternal))
|
|
expectAllowed(t, getVolumeAttachment(node2ClientExternal))
|
|
|
|
// create node2 again
|
|
expectAllowed(t, createNode2(node2Client))
|
|
// node2 can not set its own config source
|
|
expectForbidden(t, setNode2ConfigSource(node2Client))
|
|
// node2 can not access the configmap config source yet
|
|
expectForbidden(t, getConfigMapConfigSource(node2Client))
|
|
// superuser can access the configmap config source
|
|
expectAllowed(t, getConfigMapConfigSource(superuserClient))
|
|
// superuser can set node2's config source
|
|
expectAllowed(t, setNode2ConfigSource(superuserClient))
|
|
// node2 can now get the configmap assigned as its config source
|
|
expectAllowed(t, getConfigMapConfigSource(node2Client))
|
|
// superuser can unset node2's config source
|
|
expectAllowed(t, unsetNode2ConfigSource(superuserClient))
|
|
// node2 can no longer get the configmap after it is unassigned as its config source
|
|
expectForbidden(t, getConfigMapConfigSource(node2Client))
|
|
// node should not be able to delete itself
|
|
expectForbidden(t, deleteNode2(node2Client))
|
|
// clean up node2
|
|
expectAllowed(t, deleteNode2(superuserClient))
|
|
|
|
//TODO(mikedanese): integration test node restriction of TokenRequest
|
|
}
|
|
|
|
// expect executes a function a set number of times until it either returns the
|
|
// expected error or executes too many times. It returns if the retries timed
|
|
// out and the last error returned by the method.
|
|
func expect(t *testing.T, f func() error, wantErr func(error) bool) (timeout bool, lastErr error) {
|
|
t.Helper()
|
|
err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
|
|
t.Helper()
|
|
lastErr = f()
|
|
if wantErr(lastErr) {
|
|
return true, nil
|
|
}
|
|
t.Logf("unexpected response, will retry: %v", lastErr)
|
|
return false, nil
|
|
})
|
|
return err == nil, lastErr
|
|
}
|
|
|
|
func expectForbidden(t *testing.T, f func() error) {
|
|
t.Helper()
|
|
if ok, err := expect(t, f, errors.IsForbidden); !ok {
|
|
t.Errorf("Expected forbidden error, got %v", err)
|
|
}
|
|
}
|
|
|
|
func expectNotFound(t *testing.T, f func() error) {
|
|
t.Helper()
|
|
if ok, err := expect(t, f, errors.IsNotFound); !ok {
|
|
t.Errorf("Expected notfound error, got %v", err)
|
|
}
|
|
}
|
|
|
|
func expectAllowed(t *testing.T, f func() error) {
|
|
t.Helper()
|
|
if ok, err := expect(t, f, func(e error) bool { return e == nil }); !ok {
|
|
t.Errorf("Expected no error, got %v", err)
|
|
}
|
|
}
|