add e2e regression tests for the kubelet being secure
This commit is contained in:
parent
52603a78ab
commit
8094e1c681
107
test/e2e/auth/node_authn.go
Normal file
107
test/e2e/auth/node_authn.go
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("node-authn")
|
||||
var ns string
|
||||
var nodeIPs []string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeList.Items)).NotTo(BeZero())
|
||||
|
||||
pickedNode := nodeList.Items[0]
|
||||
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
|
||||
// The pods running in the cluster can see the internal addresses.
|
||||
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
|
||||
|
||||
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(sa.Secrets)).NotTo(BeZero())
|
||||
})
|
||||
|
||||
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
|
||||
pod := createNodeAuthTestPod(f)
|
||||
for _, nodeIP := range nodeIPs {
|
||||
// Anonymous authentication is disabled by default
|
||||
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
|
||||
}
|
||||
})
|
||||
|
||||
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
|
||||
By("create a new ServiceAccount for authentication")
|
||||
trueValue := true
|
||||
newSA := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: "node-auth-newSA",
|
||||
},
|
||||
AutomountServiceAccountToken: &trueValue,
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod := createNodeAuthTestPod(f)
|
||||
|
||||
for _, nodeIP := range nodeIPs {
|
||||
result := framework.RunHostCmdOrDie(ns,
|
||||
pod.Name,
|
||||
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
|
||||
"%{http_code}",
|
||||
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-node-authn-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test-node-authn",
|
||||
Image: imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
Command: []string{"sleep 3600"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return f.PodClient().CreateSync(pod)
|
||||
}
|
@ -492,7 +492,7 @@ type usageDataPerContainer struct {
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -4494,7 +4494,7 @@ func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
const proxyTimeout = 2 * time.Minute
|
||||
|
||||
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
|
||||
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
||||
// This will leak a goroutine if proxy hangs. #22165
|
||||
var result restclient.Result
|
||||
@ -4503,7 +4503,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Name(fmt.Sprintf("%v:%v", node, port)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
|
||||
@ -4531,7 +4531,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
|
||||
|
||||
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
|
||||
result := &v1.PodList{}
|
||||
client, err := NodeProxyRequest(c, node, resource)
|
||||
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
|
||||
if err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
|
85
test/e2e/lifecycle/kubelet_security.go
Normal file
85
test/e2e/lifecycle/kubelet_security.go
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-security")
|
||||
|
||||
var node *v1.Node
|
||||
var nodeName string
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).NotTo(BeZero())
|
||||
node = &nodes.Items[0]
|
||||
nodeName = node.Name
|
||||
})
|
||||
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
||||
It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
||||
})
|
||||
It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
||||
})
|
||||
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
|
||||
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
|
||||
for _, port := range disabledPorts {
|
||||
It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
|
||||
portClosedTest(f, node, port)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// checks whether the target port is closed
|
||||
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
||||
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
|
||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
||||
|
||||
for _, addr := range nodeAddrs {
|
||||
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
framework.Failf("port %d is not disabled", port)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user