kubernetes/test/e2e_node/log_path_test.go
Filipe Brandenburger e98ba5021e Skip log path tests when they are expected to fail.
The log path test is not expected to pass unless the Docker is using the
JSON logging driver, since that's what the log path is trying to find.
When Docker is using the journald logging driver, there will be no JSON
files in the logging directories for it to find.

Furthermore, when SELinux support is enabled in the Docker daemon,
SELinux will prevent processes running inside Docker containers from
accessing the log files owned by Docker (which is what this test is
trying to accomplish), so let's also skip this test in case SELinux
support is enabled.

Tested:

- With Docker daemon started using --log-driver=journald:

    S [SKIPPING] in Spec Setup (BeforeEach) [8.193 seconds]
    [k8s.io] ContainerLogPath
      Pod with a container
        printed log to stdout
          should print log to correct log path [BeforeEach]
          Jan  3 18:33:44.869: Skipping because Docker daemon is using a logging driver other than "json-file": journald

- With Docker daemon started using --selinux-enabled:

    S [SKIPPING] in Spec Setup (BeforeEach) [8.488 seconds]
    [k8s.io] ContainerLogPath
      Pod with a container
        printed log to stdout
          should print log to correct log path [BeforeEach]
          Jan  3 18:35:58.909: Skipping because Docker daemon is running with SELinux support enabled

- With Docker started using JSON logging driver and with SELinux disabled:

    • [SLOW TEST:16.352 seconds]  (passed)
    [k8s.io] ContainerLogPath
      Pod with a container
        printed log to stdout
          should print log to correct log path
    Ran 1 of 256 Specs in 36.428 seconds
    SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 255 Skipped
2018-01-19 10:51:13 -08:00

149 lines
4.9 KiB
Go

/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
logString = "This is the expected log content of this node e2e test"
logPodName = "logger-pod"
logContName = "logger-container"
checkPodName = "checker-pod"
checkContName = "checker-container"
)
var _ = framework.KubeDescribe("ContainerLogPath", func() {
f := framework.NewDefaultFramework("kubelet-container-log-path")
Describe("Pod with a container", func() {
Context("printed log to stdout", func() {
BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
// Container Log Path support requires JSON logging driver.
// It does not work when Docker daemon is logging to journald.
d, err := getDockerLoggingDriver()
framework.ExpectNoError(err)
if d != "json-file" {
framework.Skipf("Skipping because Docker daemon is using a logging driver other than \"json-file\": %s", d)
}
// Even if JSON logging is in use, this test fails if SELinux support
// is enabled, since the isolation provided by the SELinux policy
// prevents processes running inside Docker containers (under SELinux
// type svirt_lxc_net_t) from accessing the log files which are owned
// by Docker (and labeled with the container_var_lib_t type.)
//
// Therefore, let's also skip this test when running with SELinux
// support enabled.
e, err := isDockerSELinuxSupportEnabled()
framework.ExpectNoError(err)
if e {
framework.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
}
}
})
It("should print log to correct log path", func() {
podClient := f.PodClient()
ns := f.Namespace.Name
logDirVolumeName := "log-dir-vol"
logDir := kubelet.ContainerLogsDir
logPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: logPodName,
},
Spec: v1.PodSpec{
// this pod is expected to exit successfully
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: busyboxImage,
Name: logContName,
Command: []string{"sh", "-c", "echo " + logString},
},
},
},
}
podClient.Create(logPod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, logPodName, ns)
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
// get containerID from created Pod
createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID)
framework.ExpectNoError(err, "Failed to get pod: %s", logPodName)
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
checkPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: checkPodName,
},
Spec: v1.PodSpec{
// this pod is expected to exit successfully
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: busyboxImage,
Name: checkContName,
// If we find expected log file and contains right content, exit 0
// else, keep checking until test timeout
Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"},
VolumeMounts: []v1.VolumeMount{
{
Name: logDirVolumeName,
// mount ContainerLogsDir to the same path in container
MountPath: expectedlogFile,
ReadOnly: true,
},
},
},
},
Volumes: []v1.Volume{
{
Name: logDirVolumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: expectedlogFile,
Type: hostPathType,
},
},
},
},
},
}
podClient.Create(checkPod)
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, checkPodName, ns)
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", checkPodName)
})
})
})
})