Move scalability, upgrade, and common packages to framework/log
This is part of the transition to using framework/log instead of the Logf inside the framework package. This will help with import size/cycles when importing the framework or subpackages.
This commit is contained in:
parent
d5245b9563
commit
afc66a5487
@ -78,6 +78,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -33,6 +33,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -170,19 +171,19 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
|
|||||||
|
|
||||||
// ConsumeCPU consumes given number of CPU
|
// ConsumeCPU consumes given number of CPU
|
||||||
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
|
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
|
||||||
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
|
e2elog.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
|
||||||
rc.cpu <- millicores
|
rc.cpu <- millicores
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsumeMem consumes given number of Mem
|
// ConsumeMem consumes given number of Mem
|
||||||
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
|
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
|
||||||
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
|
e2elog.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
|
||||||
rc.mem <- megabytes
|
rc.mem <- megabytes
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsumeMem consumes given number of custom metric
|
// ConsumeMem consumes given number of custom metric
|
||||||
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
|
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
|
||||||
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
|
e2elog.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
|
||||||
rc.customMetric <- amount
|
rc.customMetric <- amount
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,13 +196,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case millicores = <-rc.cpu:
|
case millicores = <-rc.cpu:
|
||||||
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
|
e2elog.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
|
||||||
case <-time.After(sleepTime):
|
case <-time.After(sleepTime):
|
||||||
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
|
e2elog.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
|
||||||
rc.sendConsumeCPURequest(millicores)
|
rc.sendConsumeCPURequest(millicores)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopCPU:
|
case <-rc.stopCPU:
|
||||||
framework.Logf("RC %s: stopping CPU consumer", rc.name)
|
e2elog.Logf("RC %s: stopping CPU consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -216,13 +217,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case megabytes = <-rc.mem:
|
case megabytes = <-rc.mem:
|
||||||
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
|
e2elog.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
|
||||||
case <-time.After(sleepTime):
|
case <-time.After(sleepTime):
|
||||||
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
|
e2elog.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
|
||||||
rc.sendConsumeMemRequest(megabytes)
|
rc.sendConsumeMemRequest(megabytes)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopMem:
|
case <-rc.stopMem:
|
||||||
framework.Logf("RC %s: stopping mem consumer", rc.name)
|
e2elog.Logf("RC %s: stopping mem consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -237,13 +238,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case delta = <-rc.customMetric:
|
case delta = <-rc.customMetric:
|
||||||
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
|
e2elog.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
|
||||||
case <-time.After(sleepTime):
|
case <-time.After(sleepTime):
|
||||||
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
|
e2elog.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
|
||||||
rc.sendConsumeCustomMetric(delta)
|
rc.sendConsumeCustomMetric(delta)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopCustomMetric:
|
case <-rc.stopCustomMetric:
|
||||||
framework.Logf("RC %s: stopping metric consumer", rc.name)
|
e2elog.Logf("RC %s: stopping metric consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -263,10 +264,10 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
|||||||
Param("millicores", strconv.Itoa(millicores)).
|
Param("millicores", strconv.Itoa(millicores)).
|
||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
||||||
framework.Logf("ConsumeCPU URL: %v", *req.URL())
|
e2elog.Logf("ConsumeCPU URL: %v", *req.URL())
|
||||||
_, err = req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeCPU failure: %v", err)
|
e2elog.Logf("ConsumeCPU failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -290,10 +291,10 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
|||||||
Param("megabytes", strconv.Itoa(megabytes)).
|
Param("megabytes", strconv.Itoa(megabytes)).
|
||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
||||||
framework.Logf("ConsumeMem URL: %v", *req.URL())
|
e2elog.Logf("ConsumeMem URL: %v", *req.URL())
|
||||||
_, err = req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeMem failure: %v", err)
|
e2elog.Logf("ConsumeMem failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -318,10 +319,10 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
|||||||
Param("delta", strconv.Itoa(delta)).
|
Param("delta", strconv.Itoa(delta)).
|
||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
||||||
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
e2elog.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
||||||
_, err = req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeCustomMetric failure: %v", err)
|
e2elog.Logf("ConsumeCustomMetric failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -366,7 +367,7 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
|
|||||||
interval := 20 * time.Second
|
interval := 20 * time.Second
|
||||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||||
replicas := rc.GetReplicas()
|
replicas := rc.GetReplicas()
|
||||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||||
})
|
})
|
||||||
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
|
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
|
||||||
@ -380,12 +381,12 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
|
|||||||
interval := 10 * time.Second
|
interval := 10 * time.Second
|
||||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||||
replicas := rc.GetReplicas()
|
replicas := rc.GetReplicas()
|
||||||
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
|
e2elog.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
|
||||||
as, err := rc.GetHpa(hpaName)
|
as, err := rc.GetHpa(hpaName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting HPA: %s", err)
|
e2elog.Logf("Error getting HPA: %s", err)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("HPA status: %+v", as.Status)
|
e2elog.Logf("HPA status: %+v", as.Status)
|
||||||
}
|
}
|
||||||
if replicas < minDesiredReplicas {
|
if replicas < minDesiredReplicas {
|
||||||
return false, fmt.Errorf("number of replicas below target")
|
return false, fmt.Errorf("number of replicas below target")
|
||||||
@ -397,7 +398,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
|
|||||||
})
|
})
|
||||||
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
framework.Logf("Number of replicas was stable over %v", duration)
|
e2elog.Logf("Number of replicas was stable over %v", duration)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
framework.ExpectNoErrorWithOffset(1, err)
|
framework.ExpectNoErrorWithOffset(1, err)
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -74,7 +75,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||||||
startedTime, err := getContainerStartedTime(p, probTestContainerName)
|
startedTime, err := getContainerStartedTime(p, probTestContainerName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
|
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
|
||||||
initialDelay := probTestInitialDelaySeconds * time.Second
|
initialDelay := probTestInitialDelaySeconds * time.Second
|
||||||
if readyTime.Sub(startedTime) < initialDelay {
|
if readyTime.Sub(startedTime) < initialDelay {
|
||||||
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
|
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
|
||||||
@ -484,14 +485,14 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
|
|||||||
// 'Terminated' which can cause indefinite blocking.)
|
// 'Terminated' which can cause indefinite blocking.)
|
||||||
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
|
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
|
||||||
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
|
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
|
||||||
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
|
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
|
||||||
|
|
||||||
// Check the pod's current state and verify that restartCount is present.
|
// Check the pod's current state and verify that restartCount is present.
|
||||||
By("checking the pod's current state and verifying that restartCount is present")
|
By("checking the pod's current state and verifying that restartCount is present")
|
||||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
|
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
|
||||||
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
||||||
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
|
e2elog.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
|
||||||
|
|
||||||
// Wait for the restart state to be as desired.
|
// Wait for the restart state to be as desired.
|
||||||
deadline := time.Now().Add(timeout)
|
deadline := time.Now().Add(timeout)
|
||||||
@ -502,7 +503,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
|
|||||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
|
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
|
||||||
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
||||||
if restartCount != lastRestartCount {
|
if restartCount != lastRestartCount {
|
||||||
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
|
e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
|
||||||
ns, pod.Name, restartCount, time.Since(start))
|
ns, pod.Name, restartCount, time.Since(start))
|
||||||
if restartCount < lastRestartCount {
|
if restartCount < lastRestartCount {
|
||||||
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
|
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
|
||||||
|
@ -18,13 +18,15 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"k8s.io/api/core/v1"
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
"time"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -658,7 +660,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
|
|||||||
By("Failing liveness probe")
|
By("Failing liveness probe")
|
||||||
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
|
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
|
||||||
|
|
||||||
framework.Logf("Pod exec output: %v / %v", stdout, stderr)
|
e2elog.Logf("Pod exec output: %v / %v", stdout, stderr)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
|
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
|
||||||
|
|
||||||
// Check that container has restarted
|
// Check that container has restarted
|
||||||
@ -671,10 +673,10 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
|
|||||||
}
|
}
|
||||||
for _, status := range pod.Status.ContainerStatuses {
|
for _, status := range pod.Status.ContainerStatuses {
|
||||||
if status.Name == pod.Spec.Containers[0].Name {
|
if status.Name == pod.Spec.Containers[0].Name {
|
||||||
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
|
e2elog.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
|
||||||
restarts = status.RestartCount
|
restarts = status.RestartCount
|
||||||
if restarts > 0 {
|
if restarts > 0 {
|
||||||
framework.Logf("Container has restart count: %v", restarts)
|
e2elog.Logf("Container has restart count: %v", restarts)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,7 +688,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
|
|||||||
// Fix liveness probe
|
// Fix liveness probe
|
||||||
By("Rewriting the file")
|
By("Rewriting the file")
|
||||||
stdout, _, err = f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("echo test-after > %v", volumeMount))
|
stdout, _, err = f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("echo test-after > %v", volumeMount))
|
||||||
framework.Logf("Pod exec output: %v", stdout)
|
e2elog.Logf("Pod exec output: %v", stdout)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
|
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
|
||||||
|
|
||||||
// Wait for container restarts to stabilize
|
// Wait for container restarts to stabilize
|
||||||
@ -703,13 +705,13 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
|
|||||||
if status.RestartCount == restarts {
|
if status.RestartCount == restarts {
|
||||||
stableCount++
|
stableCount++
|
||||||
if stableCount > stableThreshold {
|
if stableCount > stableThreshold {
|
||||||
framework.Logf("Container restart has stabilized")
|
e2elog.Logf("Container restart has stabilized")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
restarts = status.RestartCount
|
restarts = status.RestartCount
|
||||||
stableCount = 0
|
stableCount = 0
|
||||||
framework.Logf("Container has restart count: %v", restarts)
|
e2elog.Logf("Container has restart count: %v", restarts)
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -31,6 +31,7 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/client/conditions"
|
"k8s.io/kubernetes/pkg/client/conditions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -87,7 +88,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||||
startedPod := podClient.Create(pod)
|
startedPod := podClient.Create(pod)
|
||||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||||
@ -158,7 +159,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||||
startedPod := podClient.Create(pod)
|
startedPod := podClient.Create(pod)
|
||||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||||
@ -230,7 +231,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||||
startedPod := podClient.Create(pod)
|
startedPod := podClient.Create(pod)
|
||||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||||
@ -280,7 +281,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
if status.RestartCount < 3 {
|
if status.RestartCount < 3 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
framework.Logf("init container has failed twice: %#v", t)
|
e2elog.Logf("init container has failed twice: %#v", t)
|
||||||
// TODO: more conditions
|
// TODO: more conditions
|
||||||
return true, nil
|
return true, nil
|
||||||
default:
|
default:
|
||||||
@ -347,7 +348,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||||
startedPod := podClient.Create(pod)
|
startedPod := podClient.Create(pod)
|
||||||
|
|
||||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -115,23 +116,23 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
|||||||
if currentHeartbeatTime == lastHeartbeatTime {
|
if currentHeartbeatTime == lastHeartbeatTime {
|
||||||
if currentObserved.Sub(lastObserved) > 2*leaseDuration {
|
if currentObserved.Sub(lastObserved) > 2*leaseDuration {
|
||||||
// heartbeat hasn't changed while watching for at least 2*leaseDuration, success!
|
// heartbeat hasn't changed while watching for at least 2*leaseDuration, success!
|
||||||
framework.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
|
e2elog.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
|
e2elog.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentHeartbeatTime.Sub(lastHeartbeatTime) >= leaseDuration {
|
if currentHeartbeatTime.Sub(lastHeartbeatTime) >= leaseDuration {
|
||||||
// heartbeat time changed, but the diff was greater than leaseDuration, success!
|
// heartbeat time changed, but the diff was greater than leaseDuration, success!
|
||||||
framework.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
e2elog.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !apiequality.Semantic.DeepEqual(lastStatus, currentStatus) {
|
if !apiequality.Semantic.DeepEqual(lastStatus, currentStatus) {
|
||||||
// heartbeat time changed, but there were relevant changes in the status, keep waiting
|
// heartbeat time changed, but there were relevant changes in the status, keep waiting
|
||||||
framework.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
e2elog.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
||||||
framework.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
|
e2elog.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
|
||||||
lastHeartbeatTime = currentHeartbeatTime
|
lastHeartbeatTime = currentHeartbeatTime
|
||||||
lastObserved = currentObserved
|
lastObserved = currentObserved
|
||||||
lastStatus = currentStatus
|
lastStatus = currentStatus
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -40,6 +40,7 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/kubelet"
|
"k8s.io/kubernetes/pkg/kubelet"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -68,14 +69,14 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
|
|||||||
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name)
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||||
if p.Status.HostIP != "" {
|
if p.Status.HostIP != "" {
|
||||||
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if time.Since(t) >= hostIPTimeout {
|
if time.Since(t) >= hostIPTimeout {
|
||||||
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
|
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
|
||||||
p.Name, time.Since(t).Seconds())
|
p.Name, time.Since(t).Seconds())
|
||||||
}
|
}
|
||||||
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
|
e2elog.Logf("Retrying to get the hostIP of pod %s", p.Name)
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,13 +118,13 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
|
|||||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
||||||
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("getRestartDelay: status missing")
|
e2elog.Logf("getRestartDelay: status missing")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// the only case this happens is if this is the first time the Pod is running and there is no "Last State".
|
// the only case this happens is if this is the first time the Pod is running and there is no "Last State".
|
||||||
if status.LastTerminationState.Terminated == nil {
|
if status.LastTerminationState.Terminated == nil {
|
||||||
framework.Logf("Container's last state is not \"Terminated\".")
|
e2elog.Logf("Container's last state is not \"Terminated\".")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
|
|||||||
} else {
|
} else {
|
||||||
startedAt = status.LastTerminationState.Terminated.StartedAt.Time
|
startedAt = status.LastTerminationState.Terminated.StartedAt.Time
|
||||||
}
|
}
|
||||||
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, previousFinishedAt, startedAt, startedAt.Sub(previousFinishedAt))
|
e2elog.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, previousFinishedAt, startedAt, startedAt.Sub(previousFinishedAt))
|
||||||
return startedAt.Sub(previousFinishedAt), nil
|
return startedAt.Sub(previousFinishedAt), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -232,10 +233,10 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
select {
|
select {
|
||||||
case listCompleted <- true:
|
case listCompleted <- true:
|
||||||
framework.Logf("observed the pod list")
|
e2elog.Logf("observed the pod list")
|
||||||
return podList, err
|
return podList, err
|
||||||
default:
|
default:
|
||||||
framework.Logf("channel blocked")
|
e2elog.Logf("channel blocked")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return podList, err
|
return podList, err
|
||||||
@ -288,7 +289,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
for _, kubeletPod := range podList.Items {
|
for _, kubeletPod := range podList.Items {
|
||||||
@ -296,12 +297,12 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||||
framework.Logf("deletion has not yet been observed")
|
e2elog.Logf("deletion has not yet been observed")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||||
return true, nil
|
return true, nil
|
||||||
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
|
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
|
||||||
|
|
||||||
@ -317,7 +318,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
lastPod = event.Object.(*v1.Pod)
|
lastPod = event.Object.(*v1.Pod)
|
||||||
deleted = true
|
deleted = true
|
||||||
case watch.Error:
|
case watch.Error:
|
||||||
framework.Logf("received a watch error: %v", event.Object)
|
e2elog.Logf("received a watch error: %v", event.Object)
|
||||||
framework.Failf("watch closed with error")
|
framework.Failf("watch closed with error")
|
||||||
}
|
}
|
||||||
case <-timer:
|
case <-timer:
|
||||||
@ -389,7 +390,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
pods, err = podClient.List(options)
|
pods, err = podClient.List(options)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||||
framework.Logf("Pod update OK")
|
e2elog.Logf("Pod update OK")
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -801,7 +802,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
podReady := podClient.PodIsReady(podName)
|
podReady := podClient.PodIsReady(podName)
|
||||||
res := expectReady == podReady
|
res := expectReady == podReady
|
||||||
if !res {
|
if !res {
|
||||||
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
|
e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
})).NotTo(gomega.HaveOccurred())
|
})).NotTo(gomega.HaveOccurred())
|
||||||
|
@ -21,10 +21,11 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -153,7 +154,7 @@ while true; do sleep 1; done
|
|||||||
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
|
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
|
||||||
|
|
||||||
By("the termination message should be set")
|
By("the termination message should be set")
|
||||||
framework.Logf("Expected: %v to match Container's Termination Message: %v --", expectedMsg, status.State.Terminated.Message)
|
e2elog.Logf("Expected: %v to match Container's Termination Message: %v --", expectedMsg, status.State.Terminated.Message)
|
||||||
Expect(status.State.Terminated.Message).Should(expectedMsg)
|
Expect(status.State.Terminated.Message).Should(expectedMsg)
|
||||||
|
|
||||||
By("delete the container")
|
By("delete the container")
|
||||||
@ -344,7 +345,7 @@ while true; do sleep 1; done
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if i < flakeRetry {
|
if i < flakeRetry {
|
||||||
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
|
e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err)
|
||||||
} else {
|
} else {
|
||||||
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
|
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,11 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -189,7 +190,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Got logs for pod %q: %q", podName, logs)
|
e2elog.Logf("Got logs for pod %q: %q", podName, logs)
|
||||||
if !strings.Contains(logs, "Operation not permitted") {
|
if !strings.Contains(logs, "Operation not permitted") {
|
||||||
framework.Failf("unprivileged container shouldn't be able to create dummy device")
|
framework.Failf("unprivileged container shouldn't be able to create dummy device")
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/timer:go_default_library",
|
"//test/e2e/framework/timer:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -43,6 +43,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -145,7 +146,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
|||||||
controllerMem = math.MaxUint64
|
controllerMem = math.MaxUint64
|
||||||
schedulerCPU := math.MaxFloat32
|
schedulerCPU := math.MaxFloat32
|
||||||
schedulerMem = math.MaxUint64
|
schedulerMem = math.MaxUint64
|
||||||
framework.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider)
|
e2elog.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider)
|
||||||
if framework.ProviderIs("kubemark") {
|
if framework.ProviderIs("kubemark") {
|
||||||
if numNodes <= 5 {
|
if numNodes <= 5 {
|
||||||
apiserverCPU = 0.35
|
apiserverCPU = 0.35
|
||||||
@ -283,7 +284,7 @@ func logPodStartupStatus(
|
|||||||
}
|
}
|
||||||
// Log status of the pods.
|
// Log status of the pods.
|
||||||
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
|
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
|
||||||
framework.Logf(startupStatus.String("Density"))
|
e2elog.Logf(startupStatus.String("Density"))
|
||||||
// Compute scheduling throughput for the latest time period.
|
// Compute scheduling throughput for the latest time period.
|
||||||
throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
|
throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
|
||||||
*scheduleThroughputs = append(*scheduleThroughputs, throughput)
|
*scheduleThroughputs = append(*scheduleThroughputs, throughput)
|
||||||
@ -328,8 +329,8 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
|||||||
startupTime := time.Since(startTime)
|
startupTime := time.Since(startTime)
|
||||||
close(logStopCh)
|
close(logStopCh)
|
||||||
close(schedulerProfilingStopCh)
|
close(schedulerProfilingStopCh)
|
||||||
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
e2elog.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
||||||
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
e2elog.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
||||||
replicationCtrlStartupPhase.End()
|
replicationCtrlStartupPhase.End()
|
||||||
|
|
||||||
// Grabbing scheduler memory profile after cluster saturation finished.
|
// Grabbing scheduler memory profile after cluster saturation finished.
|
||||||
@ -358,7 +359,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
|||||||
}
|
}
|
||||||
sort.Strings(nodeNames)
|
sort.Strings(nodeNames)
|
||||||
for _, node := range nodeNames {
|
for _, node := range nodeNames {
|
||||||
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
|
e2elog.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
|
||||||
}
|
}
|
||||||
defer printPodAllocationPhase.End()
|
defer printPodAllocationPhase.End()
|
||||||
return startupTime
|
return startupTime
|
||||||
@ -431,7 +432,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
NumberOfPods: totalPods,
|
NumberOfPods: totalPods,
|
||||||
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
|
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
|
||||||
}
|
}
|
||||||
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
|
e2elog.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
|
||||||
|
|
||||||
summaries := make([]framework.TestDataSummary, 0, 2)
|
summaries := make([]framework.TestDataSummary, 0, 2)
|
||||||
// Verify latency metrics.
|
// Verify latency metrics.
|
||||||
@ -521,7 +522,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
framework.ExpectNoError(framework.ResetMetrics(c))
|
framework.ExpectNoError(framework.ResetMetrics(c))
|
||||||
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
|
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
|
||||||
|
|
||||||
framework.Logf("Listing nodes for easy debugging:\n")
|
e2elog.Logf("Listing nodes for easy debugging:\n")
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
var internalIP, externalIP string
|
var internalIP, externalIP string
|
||||||
for _, address := range node.Status.Addresses {
|
for _, address := range node.Status.Addresses {
|
||||||
@ -532,7 +533,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
externalIP = address.Address
|
externalIP = address.Address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
|
e2elog.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start apiserver CPU profile gatherer with frequency based on cluster size.
|
// Start apiserver CPU profile gatherer with frequency based on cluster size.
|
||||||
@ -661,7 +662,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
Client: clients[i],
|
Client: clients[i],
|
||||||
Name: secretName,
|
Name: secretName,
|
||||||
Namespace: nsName,
|
Namespace: nsName,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
})
|
})
|
||||||
secretNames = append(secretNames, secretName)
|
secretNames = append(secretNames, secretName)
|
||||||
}
|
}
|
||||||
@ -673,7 +674,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
Client: clients[i],
|
Client: clients[i],
|
||||||
Name: configMapName,
|
Name: configMapName,
|
||||||
Namespace: nsName,
|
Namespace: nsName,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
})
|
})
|
||||||
configMapNames = append(configMapNames, configMapName)
|
configMapNames = append(configMapNames, configMapName)
|
||||||
}
|
}
|
||||||
@ -693,7 +694,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
MemRequest: nodeMemCapacity / 100,
|
MemRequest: nodeMemCapacity / 100,
|
||||||
MaxContainerFailures: &MaxContainerFailures,
|
MaxContainerFailures: &MaxContainerFailures,
|
||||||
Silent: true,
|
Silent: true,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
SecretNames: secretNames,
|
SecretNames: secretNames,
|
||||||
ConfigMapNames: configMapNames,
|
ConfigMapNames: configMapNames,
|
||||||
ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
|
ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
|
||||||
@ -745,7 +746,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
Client: f.ClientSet,
|
Client: f.ClientSet,
|
||||||
Name: fmt.Sprintf("density-daemon-%v", i),
|
Name: fmt.Sprintf("density-daemon-%v", i),
|
||||||
Namespace: f.Namespace.Name,
|
Namespace: f.Namespace.Name,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
|
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
|
||||||
@ -815,7 +816,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
p, ok := obj.(*v1.Pod)
|
p, ok := obj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to cast observed object to *v1.Pod.")
|
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
|
||||||
}
|
}
|
||||||
Expect(ok).To(Equal(true))
|
Expect(ok).To(Equal(true))
|
||||||
go checkPod(p)
|
go checkPod(p)
|
||||||
@ -823,7 +824,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
p, ok := newObj.(*v1.Pod)
|
p, ok := newObj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to cast observed object to *v1.Pod.")
|
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
|
||||||
}
|
}
|
||||||
Expect(ok).To(Equal(true))
|
Expect(ok).To(Equal(true))
|
||||||
go checkPod(p)
|
go checkPod(p)
|
||||||
@ -836,7 +837,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
}
|
}
|
||||||
for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
|
for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
|
||||||
podIndexOffset := latencyPodsIteration * nodeCount
|
podIndexOffset := latencyPodsIteration * nodeCount
|
||||||
framework.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
|
e2elog.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
|
||||||
|
|
||||||
watchTimesLen := len(watchTimes)
|
watchTimesLen := len(watchTimes)
|
||||||
|
|
||||||
@ -886,7 +887,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
}
|
}
|
||||||
for node, count := range nodeToLatencyPods {
|
for node, count := range nodeToLatencyPods {
|
||||||
if count > 1 {
|
if count > 1 {
|
||||||
framework.Logf("%d latency pods scheduled on %s", count, node)
|
e2elog.Logf("%d latency pods scheduled on %s", count, node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -934,22 +935,22 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
for name, create := range createTimes {
|
for name, create := range createTimes {
|
||||||
sched, ok := scheduleTimes[name]
|
sched, ok := scheduleTimes[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to find schedule time for %v", name)
|
e2elog.Logf("Failed to find schedule time for %v", name)
|
||||||
missingMeasurements++
|
missingMeasurements++
|
||||||
}
|
}
|
||||||
run, ok := runTimes[name]
|
run, ok := runTimes[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to find run time for %v", name)
|
e2elog.Logf("Failed to find run time for %v", name)
|
||||||
missingMeasurements++
|
missingMeasurements++
|
||||||
}
|
}
|
||||||
watch, ok := watchTimes[name]
|
watch, ok := watchTimes[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to find watch time for %v", name)
|
e2elog.Logf("Failed to find watch time for %v", name)
|
||||||
missingMeasurements++
|
missingMeasurements++
|
||||||
}
|
}
|
||||||
node, ok := nodeNames[name]
|
node, ok := nodeNames[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Logf("Failed to find node for %v", name)
|
e2elog.Logf("Failed to find node for %v", name)
|
||||||
missingMeasurements++
|
missingMeasurements++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1035,5 +1036,5 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
|||||||
}
|
}
|
||||||
framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc))
|
framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc))
|
||||||
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
||||||
framework.Logf("Found pod '%s' running", name)
|
e2elog.Logf("Found pod '%s' running", name)
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -50,6 +50,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -236,27 +237,27 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
|
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
|
||||||
defer serviceCreationPhase.End()
|
defer serviceCreationPhase.End()
|
||||||
if itArg.services {
|
if itArg.services {
|
||||||
framework.Logf("Creating services")
|
e2elog.Logf("Creating services")
|
||||||
services := generateServicesForConfigs(configs)
|
services := generateServicesForConfigs(configs)
|
||||||
createService := func(i int) {
|
createService := func(i int) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
|
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
|
||||||
}
|
}
|
||||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
|
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
|
||||||
framework.Logf("%v Services created.", len(services))
|
e2elog.Logf("%v Services created.", len(services))
|
||||||
defer func(services []*v1.Service) {
|
defer func(services []*v1.Service) {
|
||||||
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
||||||
defer serviceCleanupPhase.End()
|
defer serviceCleanupPhase.End()
|
||||||
framework.Logf("Starting to delete services...")
|
e2elog.Logf("Starting to delete services...")
|
||||||
deleteService := func(i int) {
|
deleteService := func(i int) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
|
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
|
||||||
}
|
}
|
||||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
|
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
|
||||||
framework.Logf("Services deleted")
|
e2elog.Logf("Services deleted")
|
||||||
}(services)
|
}(services)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Skipping service creation")
|
e2elog.Logf("Skipping service creation")
|
||||||
}
|
}
|
||||||
serviceCreationPhase.End()
|
serviceCreationPhase.End()
|
||||||
// Create all secrets.
|
// Create all secrets.
|
||||||
@ -284,7 +285,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
Client: f.ClientSet,
|
Client: f.ClientSet,
|
||||||
Name: daemonName,
|
Name: daemonName,
|
||||||
Namespace: f.Namespace.Name,
|
Namespace: f.Namespace.Name,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
}
|
}
|
||||||
daemonConfig.Run()
|
daemonConfig.Run()
|
||||||
defer func(config *testutils.DaemonConfig) {
|
defer func(config *testutils.DaemonConfig) {
|
||||||
@ -313,7 +314,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
// to make it possible to create/schedule them in the meantime.
|
// to make it possible to create/schedule them in the meantime.
|
||||||
// Currently we assume <throughput> pods/second average throughput.
|
// Currently we assume <throughput> pods/second average throughput.
|
||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
framework.Logf("Starting to create %v objects...", itArg.kind)
|
e2elog.Logf("Starting to create %v objects...", itArg.kind)
|
||||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
|
|
||||||
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
|
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
|
||||||
@ -326,7 +327,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
// The expected number of created/deleted pods is totalPods/4 when scaling,
|
// The expected number of created/deleted pods is totalPods/4 when scaling,
|
||||||
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
|
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
|
||||||
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
|
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
|
||||||
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
|
e2elog.Logf("Starting to scale %v objects first time...", itArg.kind)
|
||||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
@ -334,7 +335,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
// Currently we assume <throughput> pods/second average deletion throughput.
|
// Currently we assume <throughput> pods/second average deletion throughput.
|
||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
framework.Logf("Starting to delete %v objects...", itArg.kind)
|
e2elog.Logf("Starting to delete %v objects...", itArg.kind)
|
||||||
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
|
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -515,7 +516,7 @@ func GenerateConfigsForGroup(
|
|||||||
Client: nil, // this will be overwritten later
|
Client: nil, // this will be overwritten later
|
||||||
Name: secretName,
|
Name: secretName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
})
|
})
|
||||||
secretNames = append(secretNames, secretName)
|
secretNames = append(secretNames, secretName)
|
||||||
}
|
}
|
||||||
@ -527,7 +528,7 @@ func GenerateConfigsForGroup(
|
|||||||
Client: nil, // this will be overwritten later
|
Client: nil, // this will be overwritten later
|
||||||
Name: configMapName,
|
Name: configMapName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
LogFunc: framework.Logf,
|
LogFunc: e2elog.Logf,
|
||||||
})
|
})
|
||||||
configMapNames = append(configMapNames, configMapName)
|
configMapNames = append(configMapNames, configMapName)
|
||||||
}
|
}
|
||||||
@ -689,7 +690,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
|
e2elog.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
|
||||||
if testutils.IsRetryableAPIError(err) {
|
if testutils.IsRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/gpu:go_default_library",
|
"//test/e2e/framework/gpu:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -79,7 +80,7 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
|
|||||||
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
|
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
|
||||||
// rely on the namespace deletion to clean up everything
|
// rely on the namespace deletion to clean up everything
|
||||||
ginkgo.By("Logging container failures")
|
ginkgo.By("Logging container failures")
|
||||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
||||||
|
@ -29,6 +29,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/e2e/upgrades:go_default_library",
|
"//test/e2e/upgrades:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -132,7 +133,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m
|
|||||||
nodeNames := make([]string, 0)
|
nodeNames := make([]string, 0)
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
if len(node.Spec.Taints) != 0 {
|
if len(node.Spec.Taints) != 0 {
|
||||||
framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
|
e2elog.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
|
||||||
}
|
}
|
||||||
// DaemonSet Pods are expected to run on all the nodes in e2e.
|
// DaemonSet Pods are expected to run on all the nodes in e2e.
|
||||||
nodeNames = append(nodeNames, node.Name)
|
nodeNames = append(nodeNames, node.Name)
|
||||||
@ -153,11 +154,11 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma
|
|||||||
nodesToPodCount := make(map[string]int)
|
nodesToPodCount := make(map[string]int)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if controller.IsPodActive(&pod) {
|
if controller.IsPodActive(&pod) {
|
||||||
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
|
e2elog.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
|
||||||
nodesToPodCount[pod.Spec.NodeName]++
|
nodesToPodCount[pod.Spec.NodeName]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
|
e2elog.Logf("nodesToPodCount: %v", nodesToPodCount)
|
||||||
|
|
||||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -90,13 +91,13 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.listUsers(); err != nil {
|
if _, err := t.listUsers(); err != nil {
|
||||||
framework.Logf("Service endpoint is up but isn't responding")
|
e2elog.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Service endpoint is up")
|
e2elog.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 dummy users")
|
ginkgo.By("Adding 2 dummy users")
|
||||||
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
||||||
@ -175,7 +176,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
writeAttempts++
|
writeAttempts++
|
||||||
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
||||||
framework.Logf("Unable to add user: %v", err)
|
e2elog.Logf("Unable to add user: %v", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
@ -187,7 +188,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Could not retrieve users: %v", err)
|
e2elog.Logf("Could not retrieve users: %v", err)
|
||||||
failures++
|
failures++
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
@ -197,14 +198,14 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
success++
|
success++
|
||||||
lastUserCount = len(users)
|
lastUserCount = len(users)
|
||||||
}, 10*time.Millisecond, done)
|
}, 10*time.Millisecond, done)
|
||||||
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||||
|
|
||||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||||
ratio := float64(success) / float64(success+failures)
|
ratio := float64(success) / float64(success+failures)
|
||||||
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||||
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||||
framework.Logf("Errors: %v", errors)
|
e2elog.Logf("Errors: %v", errors)
|
||||||
// TODO(maisem): tweak this value once we have a few test runs.
|
// TODO(maisem): tweak this value once we have a few test runs.
|
||||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -85,13 +86,13 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.listUsers(); err != nil {
|
if _, err := t.listUsers(); err != nil {
|
||||||
framework.Logf("Service endpoint is up but isn't responding")
|
e2elog.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Service endpoint is up")
|
e2elog.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 dummy users")
|
ginkgo.By("Adding 2 dummy users")
|
||||||
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
||||||
@ -163,7 +164,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
writeAttempts++
|
writeAttempts++
|
||||||
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
||||||
framework.Logf("Unable to add user: %v", err)
|
e2elog.Logf("Unable to add user: %v", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
@ -175,7 +176,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Could not retrieve users: %v", err)
|
e2elog.Logf("Could not retrieve users: %v", err)
|
||||||
failures++
|
failures++
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
@ -185,14 +186,14 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
success++
|
success++
|
||||||
lastUserCount = len(users)
|
lastUserCount = len(users)
|
||||||
}, 10*time.Millisecond, done)
|
}, 10*time.Millisecond, done)
|
||||||
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||||
|
|
||||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||||
ratio := float64(success) / float64(success+failures)
|
ratio := float64(success) / float64(success+failures)
|
||||||
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||||
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||||
framework.Logf("Errors: %v", errors)
|
e2elog.Logf("Errors: %v", errors)
|
||||||
// TODO(maisem): tweak this value once we have a few test runs.
|
// TODO(maisem): tweak this value once we have a few test runs.
|
||||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
@ -21,12 +21,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -104,12 +105,12 @@ func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
||||||
framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
|
e2elog.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
pods, err := getKubeProxyStaticPods(c)
|
pods, err := getKubeProxyStaticPods(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to get kube-proxy static pods: %v", err)
|
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +122,7 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numberkubeProxyPods != numberSchedulableNodes {
|
if numberkubeProxyPods != numberSchedulableNodes {
|
||||||
framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
|
e2elog.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -134,17 +135,17 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
||||||
framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
|
e2elog.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
pods, err := getKubeProxyStaticPods(c)
|
pods, err := getKubeProxyStaticPods(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to get kube-proxy static pods: %v", err)
|
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pods.Items) != 0 {
|
if len(pods.Items) != 0 {
|
||||||
framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
|
e2elog.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -157,24 +158,24 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
||||||
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
|
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
daemonSets, err := getKubeProxyDaemonSet(c)
|
daemonSets, err := getKubeProxyDaemonSet(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(daemonSets.Items) != 1 {
|
if len(daemonSets.Items) != 1 {
|
||||||
framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
|
e2elog.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
||||||
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
|
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
|
||||||
if numberkubeProxyPods != numberSchedulableNodes {
|
if numberkubeProxyPods != numberSchedulableNodes {
|
||||||
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
|
e2elog.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -187,17 +188,17 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
|
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
|
||||||
framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
|
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
daemonSets, err := getKubeProxyDaemonSet(c)
|
daemonSets, err := getKubeProxyDaemonSet(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(daemonSets.Items) != 0 {
|
if len(daemonSets.Items) != 0 {
|
||||||
framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
|
e2elog.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -100,13 +101,13 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.countNames(); err != nil {
|
if _, err := t.countNames(); err != nil {
|
||||||
framework.Logf("Service endpoint is up but isn't responding")
|
e2elog.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Service endpoint is up")
|
e2elog.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 names to the database")
|
ginkgo.By("Adding 2 names to the database")
|
||||||
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
|
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
|
||||||
@ -126,7 +127,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
_, err := t.countNames()
|
_, err := t.countNames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error while trying to read data: %v", err)
|
e2elog.Logf("Error while trying to read data: %v", err)
|
||||||
readFailure++
|
readFailure++
|
||||||
} else {
|
} else {
|
||||||
readSuccess++
|
readSuccess++
|
||||||
@ -136,7 +137,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
err := t.addName(strconv.Itoa(t.nextWrite))
|
err := t.addName(strconv.Itoa(t.nextWrite))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error while trying to write data: %v", err)
|
e2elog.Logf("Error while trying to write data: %v", err)
|
||||||
writeFailure++
|
writeFailure++
|
||||||
} else {
|
} else {
|
||||||
writeSuccess++
|
writeSuccess++
|
||||||
@ -144,10 +145,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
}, framework.Poll, done)
|
}, framework.Poll, done)
|
||||||
|
|
||||||
t.successfulWrites = writeSuccess
|
t.successfulWrites = writeSuccess
|
||||||
framework.Logf("Successful reads: %d", readSuccess)
|
e2elog.Logf("Successful reads: %d", readSuccess)
|
||||||
framework.Logf("Successful writes: %d", writeSuccess)
|
e2elog.Logf("Successful writes: %d", writeSuccess)
|
||||||
framework.Logf("Failed reads: %d", readFailure)
|
e2elog.Logf("Failed reads: %d", readFailure)
|
||||||
framework.Logf("Failed writes: %d", writeFailure)
|
e2elog.Logf("Failed writes: %d", writeFailure)
|
||||||
|
|
||||||
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
|
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
|
||||||
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
|
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -89,7 +90,7 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
|
|||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
|
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Created job %v", testJob)
|
e2elog.Logf("Created job %v", testJob)
|
||||||
ginkgo.By("Waiting for gpu job pod start")
|
ginkgo.By("Waiting for gpu job pod start")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
|
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -105,11 +106,11 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
|
|||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
|
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
createdPod := pods.Items[0].Name
|
createdPod := pods.Items[0].Name
|
||||||
framework.Logf("Created pod %v", createdPod)
|
e2elog.Logf("Created pod %v", createdPod)
|
||||||
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
|
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, ns, createdPod, "vector-addition")
|
logs, err := framework.GetPodLogs(f.ClientSet, ns, createdPod, "vector-addition")
|
||||||
framework.ExpectNoError(err, "Should be able to get pod logs")
|
framework.ExpectNoError(err, "Should be able to get pod logs")
|
||||||
framework.Logf("Got pod logs: %v", logs)
|
e2elog.Logf("Got pod logs: %v", logs)
|
||||||
regex := regexp.MustCompile("PASSED")
|
regex := regexp.MustCompile("PASSED")
|
||||||
gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
|
gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user