Merge pull request #109798 from liggitt/psp
Remove PodSecurityPolicy admission plugin
This commit is contained in:
@@ -420,17 +420,74 @@ run_deprecated_api_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing deprecated APIs"
|
||||
|
||||
# Create deprecated CRD
|
||||
kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"metadata": {
|
||||
"name": "deprecated.example.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "example.com",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "deprecated",
|
||||
"kind": "DeprecatedKind"
|
||||
},
|
||||
"versions": [
|
||||
{
|
||||
"name": "v1",
|
||||
"served": true,
|
||||
"storage": true,
|
||||
"schema": {
|
||||
"openAPIV3Schema": {
|
||||
"x-kubernetes-preserve-unknown-fields": true,
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1beta1",
|
||||
"deprecated": true,
|
||||
"served": true,
|
||||
"storage": false,
|
||||
"schema": {
|
||||
"openAPIV3Schema": {
|
||||
"x-kubernetes-preserve-unknown-fields": true,
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Ensure the API server has recognized and started serving the associated CR API
|
||||
local tries=5
|
||||
for i in $(seq 1 $tries); do
|
||||
local output
|
||||
output=$(kubectl "${kube_flags[@]:?}" api-resources --api-group example.com -oname)
|
||||
if kube::test::if_has_string "$output" deprecated.example.com; then
|
||||
break
|
||||
fi
|
||||
echo "${i}: Waiting for CR API to be available"
|
||||
sleep "$i"
|
||||
done
|
||||
|
||||
# Test deprecated API request output
|
||||
# TODO(liggitt): switch this to a custom deprecated resource once CRDs support marking versions as deprecated
|
||||
output_message=$(kubectl get podsecuritypolicies.v1beta1.policy 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'PodSecurityPolicy is deprecated'
|
||||
output_message=$(! kubectl get podsecuritypolicies.v1beta1.policy --warnings-as-errors 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'PodSecurityPolicy is deprecated'
|
||||
output_message=$(kubectl get deprecated.v1beta1.example.com 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'example.com/v1beta1 DeprecatedKind is deprecated'
|
||||
output_message=$(! kubectl get deprecated.v1beta1.example.com --warnings-as-errors 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'example.com/v1beta1 DeprecatedKind is deprecated'
|
||||
kube::test::if_has_string "${output_message}" 'error: 1 warning received'
|
||||
|
||||
# Delete deprecated CRD
|
||||
kubectl delete "${kube_flags[@]}" crd deprecated.example.com
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
@@ -90,7 +90,6 @@ nodes="nodes"
|
||||
persistentvolumeclaims="persistentvolumeclaims"
|
||||
persistentvolumes="persistentvolumes"
|
||||
pods="pods"
|
||||
podsecuritypolicies="podsecuritypolicies"
|
||||
podtemplates="podtemplates"
|
||||
replicasets="replicasets"
|
||||
replicationcontrollers="replicationcontrollers"
|
||||
@@ -934,7 +933,7 @@ runTests() {
|
||||
# Kubectl deprecated APIs #
|
||||
############################
|
||||
|
||||
if kube::test::if_supports_resource "${podsecuritypolicies}" ; then
|
||||
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
|
||||
record_command run_deprecated_api_tests
|
||||
fi
|
||||
|
||||
|
@@ -1,367 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const nobodyUser = int64(65534)
|
||||
|
||||
var _ = SIGDescribe("PodSecurityPolicy [Feature:PodSecurityPolicy]", func() {
|
||||
f := framework.NewDefaultFramework("podsecuritypolicy")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.SkipPrivilegedPSPBinding = true
|
||||
|
||||
// Client that will impersonate the default service account, in order to run
|
||||
// with reduced privileges.
|
||||
var c clientset.Interface
|
||||
var ns string // Test namespace, for convenience
|
||||
ginkgo.BeforeEach(func() {
|
||||
if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) {
|
||||
framework.Failf("PodSecurityPolicy not enabled")
|
||||
return
|
||||
}
|
||||
if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
|
||||
e2eskipper.Skipf("RBAC not enabled")
|
||||
}
|
||||
ns = f.Namespace.Name
|
||||
|
||||
ginkgo.By("Creating a kubernetes client that impersonates the default service account")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: serviceaccount.MakeUsername(ns, "default"),
|
||||
Groups: serviceaccount.MakeGroupNames(ns),
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Binding the edit role to the default SA")
|
||||
err = e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns,
|
||||
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default"})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should forbid pod creation when no PSP is available", func() {
|
||||
ginkgo.By("Running a restricted pod")
|
||||
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("restricted"), metav1.CreateOptions{})
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce the restricted policy.PodSecurityPolicy", func() {
|
||||
ginkgo.By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
ginkgo.By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("allowed"), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
|
||||
ginkgo.By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSP(f, privilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.CoreV1().Pods(ns).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
framework.ExpectEqual(found, true, "PSP annotation not found")
|
||||
framework.ExpectEqual(validated, expectedPSP.Name, "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func expectForbidden(err error) {
|
||||
framework.ExpectError(err, "should be forbidden")
|
||||
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
ginkgo.By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod("privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod("hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
MountPath: "/hp",
|
||||
}}
|
||||
hostpath.Spec.Volumes = []v1.Volume{{
|
||||
Name: "hp",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/tmp"},
|
||||
},
|
||||
}}
|
||||
tester(hostpath)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod("hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod("hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod("hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
ginkgo.By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod("seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod("sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_ADMIN"},
|
||||
}
|
||||
sysadmin.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(sysadmin)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a RunAsGroup pod", func() {
|
||||
sysadmin := restrictedPod("runasgroup")
|
||||
gid := int64(0)
|
||||
sysadmin.Spec.Containers[0].SecurityContext.RunAsGroup = &gid
|
||||
tester(sysadmin)
|
||||
})
|
||||
|
||||
ginkgo.By("Running a RunAsUser pod", func() {
|
||||
sysadmin := restrictedPod("runasuser")
|
||||
uid := int64(0)
|
||||
sysadmin.Spec.Containers[0].SecurityContext.RunAsUser = &uid
|
||||
tester(sysadmin)
|
||||
})
|
||||
}
|
||||
|
||||
// createAndBindPSP creates a PSP in the policy API group.
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecurityPolicy) (psp *policyv1beta1.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
ns := f.Namespace.Name
|
||||
name := fmt.Sprintf("%s-%s", ns, psp.Name)
|
||||
psp.Name = name
|
||||
psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create PSP")
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1().Roles(ns).Create(context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"policy"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{name},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create PSP role")
|
||||
|
||||
// Bind the role to the namespace.
|
||||
err = e2eauth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: ns,
|
||||
Name: "default",
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
|
||||
schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true))
|
||||
|
||||
return psp, func() {
|
||||
// Cleanup non-namespaced PSP object.
|
||||
f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
v1.AppArmorBetaContainerAnnotationKeyPrefix + "pause": v1.AppArmorBetaProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RunAsUser: utilpointer.Int64Ptr(nobodyUser),
|
||||
RunAsGroup: utilpointer.Int64Ptr(nobodyUser),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// privilegedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that allows everything.
|
||||
func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(true),
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policyv1beta1.FSType{policyv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
RunAsGroup: &policyv1beta1.RunAsGroupStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsGroupStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// restrictedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that is most strict.
|
||||
func restrictedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
seccomp.DefaultProfileAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
v1.AppArmorBetaAllowedProfilesAnnotationKey: v1.AppArmorBetaProfileRuntimeDefault,
|
||||
v1.AppArmorBetaDefaultProfileAnnotationKey: v1.AppArmorBetaProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
|
||||
RequiredDropCapabilities: []v1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []policyv1beta1.FSType{
|
||||
policyv1beta1.ConfigMap,
|
||||
policyv1beta1.EmptyDir,
|
||||
policyv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
policyv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
RunAsGroup: &policyv1beta1.RunAsGroupStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsGroupStrategyMustRunAs,
|
||||
Ranges: []policyv1beta1.IDRange{
|
||||
{Min: nobodyUser, Max: nobodyUser}},
|
||||
},
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
@@ -83,7 +83,6 @@ type Framework struct {
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
|
||||
|
||||
gatherer *ContainerResourceGatherer
|
||||
@@ -545,10 +544,6 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
// fail to create serviceAccount in it.
|
||||
f.AddNamespacesToDelete(ns)
|
||||
|
||||
if err == nil && !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f.ClientSet, ns.Name)
|
||||
}
|
||||
|
||||
return ns, err
|
||||
}
|
||||
|
||||
|
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
|
||||
|
||||
// allowAny is the wildcard used to allow any profile.
|
||||
allowAny = "*"
|
||||
|
||||
// allowedProfilesAnnotationKey specifies the allowed seccomp profiles.
|
||||
allowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames"
|
||||
)
|
||||
|
||||
var (
|
||||
isPSPEnabledOnce sync.Once
|
||||
isPSPEnabled bool
|
||||
)
|
||||
|
||||
// privilegedPSP creates a PodSecurityPolicy that allows everything.
|
||||
func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{allowedProfilesAnnotationKey: allowAny},
|
||||
},
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policyv1beta1.FSType{policyv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
|
||||
func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
|
||||
isPSPEnabledOnce.Do(func() {
|
||||
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
return
|
||||
}
|
||||
if psps == nil || len(psps.Items) == 0 {
|
||||
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
|
||||
return
|
||||
}
|
||||
Logf("Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled")
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "psp-test-pod-"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "test", Image: imageutils.GetPauseImageName()}}},
|
||||
}
|
||||
dryRunPod, err := kubeClient.CoreV1().Pods("kube-system").Create(context.TODO(), testPod, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "PodSecurityPolicy") {
|
||||
Logf("PodSecurityPolicy error creating dryrun pod; assuming PodSecurityPolicy is enabled: %v", err)
|
||||
isPSPEnabled = true
|
||||
} else {
|
||||
Logf("Error creating dryrun pod; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
pspAnnotation, pspAnnotationExists := dryRunPod.Annotations["kubernetes.io/psp"]
|
||||
if !pspAnnotationExists {
|
||||
Logf("No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled")
|
||||
return
|
||||
}
|
||||
Logf("PSP annotation exists on dry run pod: %q; assuming PodSecurityPolicy is enabled", pspAnnotation)
|
||||
isPSPEnabled = true
|
||||
})
|
||||
return isPSPEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
privilegedPSPOnce sync.Once
|
||||
)
|
||||
|
||||
// CreatePrivilegedPSPBinding creates the privileged PSP & role
|
||||
func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string) {
|
||||
if !IsPodSecurityPolicyEnabled(kubeClient) {
|
||||
return
|
||||
}
|
||||
// Create the privileged PSP & role
|
||||
privilegedPSPOnce.Do(func() {
|
||||
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
// Privileged PSP was already created.
|
||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||
return
|
||||
}
|
||||
|
||||
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
}
|
||||
|
||||
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
|
||||
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "system:serviceaccounts:" + namespace,
|
||||
},
|
||||
)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
}
|
||||
}
|
@@ -32,14 +32,12 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -65,11 +63,6 @@ const (
|
||||
maxValidSize string = "10Ei"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClusterRole name for e2e test Priveledged Pod Security Policy User
|
||||
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
@@ -417,54 +410,6 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
|
||||
return pod
|
||||
}
|
||||
|
||||
// PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
|
||||
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
||||
namespace string,
|
||||
teardown bool,
|
||||
saNames []string) {
|
||||
bindingString := "Binding"
|
||||
if teardown {
|
||||
bindingString = "Unbinding"
|
||||
}
|
||||
roleBindingClient := client.RbacV1().RoleBindings(namespace)
|
||||
for _, saName := range saNames {
|
||||
ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "psp-" + saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
Name: podSecurityPolicyPrivilegedClusterRoleName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
|
||||
roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{})
|
||||
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
_, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
|
||||
return apierrors.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
|
||||
|
||||
if teardown {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
|
Reference in New Issue
Block a user