e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -33,13 +33,13 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.Describe("Granular Checks: Pods", func() {
checkPodToPodConnectivity := func(config *e2enetwork.NetworkingTestConfig, protocol string, port int) {
checkPodToPodConnectivity := func(ctx context.Context, config *e2enetwork.NetworkingTestConfig, protocol string, port int) {
// breadth first poll to quickly estimate failure.
failedPodsByHost := map[string][]*v1.Pod{}
// First time, we'll quickly try all pods, breadth first.
for _, endpointPod := range config.EndpointPods {
framework.Logf("Breadth first check of %v on host %v...", endpointPod.Status.PodIP, endpointPod.Status.HostIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil {
if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil {
if _, ok := failedPodsByHost[endpointPod.Status.HostIP]; !ok {
failedPodsByHost[endpointPod.Status.HostIP] = []*v1.Pod{}
}
@@ -54,7 +54,7 @@ var _ = SIGDescribe("Networking", func() {
framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host)
for _, endpointPod := range failedPods {
framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil {
if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil {
errors = append(errors, err)
} else {
framework.Logf("Was able to reach %v on %v ", endpointPod.Status.PodIP, endpointPod.Status.HostIP)
@@ -82,8 +82,8 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
checkPodToPodConnectivity(config, "http", e2enetwork.EndpointHTTPPort)
config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false)
checkPodToPodConnectivity(ctx, config, "http", e2enetwork.EndpointHTTPPort)
})
/*
@@ -93,8 +93,8 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
checkPodToPodConnectivity(config, "udp", e2enetwork.EndpointUDPPort)
config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false)
checkPodToPodConnectivity(ctx, config, "udp", e2enetwork.EndpointUDPPort)
})
/*
@@ -105,9 +105,9 @@ var _ = SIGDescribe("Networking", func() {
This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/
framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true)
for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
err := config.DialFromNode(ctx, "http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil {
framework.Failf("Error dialing HTTP node to pod %v", err)
}
@@ -122,9 +122,9 @@ var _ = SIGDescribe("Networking", func() {
This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/
framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true)
for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
err := config.DialFromNode(ctx, "udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil {
framework.Failf("Error dialing UDP from node to pod: %v", err)
}
@@ -132,15 +132,15 @@ var _ = SIGDescribe("Networking", func() {
})
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort)
config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
checkPodToPodConnectivity(ctx, config, "sctp", e2enetwork.EndpointSCTPPort)
})
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482")
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
err := config.DialFromNode(ctx, "sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil {
framework.Failf("Error dialing SCTP from node to pod: %v", err)
}

View File

@@ -47,7 +47,7 @@ var _ = SIGDescribe("ConfigMap", func() {
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -80,7 +80,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
@@ -95,7 +95,7 @@ var _ = SIGDescribe("ConfigMap", func() {
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -124,7 +124,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})
@@ -136,7 +136,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail.
*/
framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) {
configMap, err := newConfigMapWithEmptyKey(f)
configMap, err := newConfigMapWithEmptyKey(ctx, f)
framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name)
})
@@ -144,17 +144,17 @@ var _ = SIGDescribe("ConfigMap", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ConfigMap")
configMap.Data = map[string]string{
"data": "value",
}
ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name))
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "failed to update ConfigMap")
configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap")
ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data)
@@ -183,11 +183,11 @@ var _ = SIGDescribe("ConfigMap", func() {
}
ginkgo.By("creating a ConfigMap")
_, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(context.TODO(), &testConfigMap, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(ctx, &testConfigMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ConfigMap")
ginkgo.By("fetching the ConfigMap")
configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{})
configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(ctx, testConfigMapName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap")
framework.ExpectEqual(configMap.Data["valueName"], testConfigMap.Data["valueName"])
framework.ExpectEqual(configMap.Labels["test-configmap-static"], testConfigMap.Labels["test-configmap-static"])
@@ -205,11 +205,11 @@ var _ = SIGDescribe("ConfigMap", func() {
framework.ExpectNoError(err, "failed to marshal patch data")
ginkgo.By("patching the ConfigMap")
_, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(ctx, testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch ConfigMap")
ginkgo.By("listing all ConfigMaps in all namespaces with a label selector")
configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(context.TODO(), metav1.ListOptions{
configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(ctx, metav1.ListOptions{
LabelSelector: "test-configmap=patched",
})
framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector")
@@ -229,13 +229,13 @@ var _ = SIGDescribe("ConfigMap", func() {
}
ginkgo.By("deleting the ConfigMap by collection with a label selector")
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "test-configmap-static=true",
})
framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector")
ginkgo.By("listing all ConfigMaps in test namespace")
configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(context.TODO(), metav1.ListOptions{
configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(ctx, metav1.ListOptions{
LabelSelector: "test-configmap-static=true",
})
framework.ExpectNoError(err, "failed to list ConfigMap by LabelSelector")
@@ -257,7 +257,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
}
}
func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
func newConfigMapWithEmptyKey(ctx context.Context, f *framework.Framework) (*v1.ConfigMap, error) {
name := "configmap-test-emptyKey-" + string(uuid.NewUUID())
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -270,5 +270,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
}
ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
}

View File

@@ -50,7 +50,7 @@ type ConformanceContainer struct {
}
// Create creates the defined conformance container
func (cc *ConformanceContainer) Create() {
func (cc *ConformanceContainer) Create(ctx context.Context) {
cc.podName = cc.Container.Name + string(uuid.NewUUID())
imagePullSecrets := []v1.LocalObjectReference{}
for _, s := range cc.ImagePullSecrets {
@@ -70,17 +70,17 @@ func (cc *ConformanceContainer) Create() {
ImagePullSecrets: imagePullSecrets,
},
}
cc.PodClient.Create(pod)
cc.PodClient.Create(ctx, pod)
}
// Delete deletes the defined conformance container
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0))
func (cc *ConformanceContainer) Delete(ctx context.Context) error {
return cc.PodClient.Delete(ctx, cc.podName, *metav1.NewDeleteOptions(0))
}
// IsReady returns whether this container is ready and error if any
func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
func (cc *ConformanceContainer) IsReady(ctx context.Context) (bool, error) {
pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -88,8 +88,8 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
}
// GetPhase returns the phase of the pod lifecycle and error if any
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
func (cc *ConformanceContainer) GetPhase(ctx context.Context) (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil {
// it doesn't matter what phase to return as error would not be nil
return v1.PodSucceeded, err
@@ -98,8 +98,8 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
}
// GetStatus returns the details of the current status of this container and error if any
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
func (cc *ConformanceContainer) GetStatus(ctx context.Context) (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil {
return v1.ContainerStatus{}, err
}
@@ -111,8 +111,8 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
}
// Present returns whether this pod is present and error if any
func (cc *ConformanceContainer) Present() (bool, error) {
_, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
func (cc *ConformanceContainer) Present(ctx context.Context) (bool, error) {
_, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err == nil {
return true, nil
}

View File

@@ -71,10 +71,10 @@ var _ = SIGDescribe("Probing container", func() {
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) {
containerName := "test-webserver"
p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
p := podClient.Create(ctx, testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout))
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
@@ -106,16 +106,16 @@ var _ = SIGDescribe("Probing container", func() {
then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) {
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
gomega.Consistently(func() (bool, error) {
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
p := podClient.Create(ctx, testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
gomega.Consistently(ctx, func() (bool, error) {
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, _ := testutils.PodRunningReady(p)
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -158,7 +158,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
})
/*
@@ -173,7 +173,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -188,7 +188,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
})
/*
@@ -204,7 +204,7 @@ var _ = SIGDescribe("Probing container", func() {
}
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
// ~2 minutes backoff timeouts + 4 minutes defaultObservationTimeout + 2 minutes for each pod restart
RunLivenessTest(f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute)
RunLivenessTest(ctx, f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute)
})
/*
@@ -220,7 +220,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers.
}
pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80)
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
})
/*
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -254,7 +254,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := busyBoxPodSpec(readinessProbe, nil, cmd)
runReadinessFailTest(f, pod, time.Minute)
runReadinessFailTest(ctx, f, pod, time.Minute)
})
/*
@@ -271,7 +271,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -286,7 +286,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -301,7 +301,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
// Expect an event of type "ProbeWarning".
expectedEvent := fields.Set{
"involvedObject.kind": "Pod",
@@ -310,7 +310,7 @@ var _ = SIGDescribe("Probing container", func() {
"reason": events.ContainerProbeWarning,
}.AsSelector().String()
framework.ExpectNoError(e2eevents.WaitTimeoutForEvent(
f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: <a href=\"http://0.0.0.0/\">Found</a>.", framework.PodEventTimeout))
ctx, f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: <a href=\"http://0.0.0.0/\">Found</a>.", framework.PodEventTimeout))
})
/*
@@ -339,7 +339,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 3,
}
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -368,7 +368,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 60,
}
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
})
/*
@@ -397,7 +397,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 60,
}
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
/*
@@ -421,22 +421,22 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 120,
PeriodSeconds: 5,
}
p := podClient.Create(startupPodSpec(startupProbe, readinessProbe, nil, cmd))
p := podClient.Create(ctx, startupPodSpec(startupProbe, readinessProbe, nil, cmd))
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodContainerStarted(f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout)
err = e2epod.WaitForPodContainerStarted(ctx, f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout)
framework.ExpectNoError(err)
startedTime := time.Now()
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err)
readyTime := time.Now()
p, err = podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err = podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
@@ -480,7 +480,7 @@ var _ = SIGDescribe("Probing container", func() {
}
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
RunLivenessTest(f, pod, 1, time.Second*30)
RunLivenessTest(ctx, f, pod, 1, time.Second*30)
})
/*
@@ -513,7 +513,7 @@ var _ = SIGDescribe("Probing container", func() {
}
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
RunLivenessTest(f, pod, 1, time.Second*30)
RunLivenessTest(ctx, f, pod, 1, time.Second*30)
})
/*
@@ -535,7 +535,7 @@ var _ = SIGDescribe("Probing container", func() {
}
pod := gRPCServerPodSpec(nil, livenessProbe, "etcd")
RunLivenessTest(f, pod, 0, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
})
/*
@@ -556,7 +556,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1,
}
pod := gRPCServerPodSpec(nil, livenessProbe, "etcd")
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
})
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) {
@@ -580,7 +580,7 @@ done
`
// Create Pod
podClient.Create(&v1.Pod{
podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
@@ -608,12 +608,14 @@ done
})
// verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false
podClient.Delete(context.Background(), podName, metav1.DeleteOptions{})
err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
err = podClient.Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
if !podutil.IsPodReady(pod) {
return true, nil
}
@@ -646,7 +648,7 @@ done
`
// Create Pod
podClient.Create(&v1.Pod{
podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
@@ -688,14 +690,15 @@ done
})
// verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false
podClient.Delete(context.Background(), podName, metav1.DeleteOptions{})
err = podClient.Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
// Wait for pod to go unready
err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
if !podutil.IsPodReady(pod) {
return true, nil
}
@@ -706,8 +709,8 @@ done
// Verify there are zero liveness failures since they are turned off
// during pod termination
gomega.Consistently(func() (bool, error) {
items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.Background(), metav1.ListOptions{})
gomega.Consistently(ctx, func(ctx context.Context) (bool, error) {
items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
for _, event := range items.Items {
// Search only for the pod we are interested in
@@ -724,37 +727,41 @@ done
})
// waitForPodStatusByInformer waits pod status change by informer
func waitForPodStatusByInformer(c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error {
func waitForPodStatusByInformer(ctx context.Context, c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error {
// TODO (pohly): rewrite with gomega.Eventually to get intermediate progress reports.
stopCh := make(chan struct{})
checkPodStatusFunc := func(pod *v1.Pod) {
if ok, _ := condition(pod); ok {
close(stopCh)
}
}
controller := newInformerWatchPod(c, podNamespace, podName, checkPodStatusFunc)
controller := newInformerWatchPod(ctx, c, podNamespace, podName, checkPodStatusFunc)
go controller.Run(stopCh)
after := time.After(timeout)
select {
case <-stopCh:
return nil
case <-ctx.Done():
close(stopCh)
return fmt.Errorf("timeout to wait pod status ready")
case <-after:
defer close(stopCh)
close(stopCh)
return fmt.Errorf("timeout to wait pod status ready")
}
}
// newInformerWatchPod creates a informer for given pod
func newInformerWatchPod(c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller {
func newInformerWatchPod(ctx context.Context, c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller {
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
obj, err := c.CoreV1().Pods(podNamespace).List(context.TODO(), options)
obj, err := c.CoreV1().Pods(podNamespace).List(ctx, options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
return c.CoreV1().Pods(podNamespace).Watch(context.TODO(), options)
return c.CoreV1().Pods(podNamespace).Watch(ctx, options)
},
},
&v1.Pod{},
@@ -936,7 +943,7 @@ func (b webserverProbeBuilder) build() *v1.Probe {
}
// RunLivenessTest verifies the number of restarts for pod with given expected number of restarts
func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
func RunLivenessTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@@ -947,18 +954,18 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
})
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
podClient.Create(ctx, pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
ginkgo.By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
@@ -968,7 +975,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
@@ -996,7 +1003,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
}
}
func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) {
func runReadinessFailTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) {
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@@ -1007,11 +1014,11 @@ func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil tim
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
})
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
podClient.Create(ctx, pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending', since when failures occur, we go to 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

View File

@@ -41,16 +41,16 @@ var _ = SIGDescribe("Containers", func() {
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name)
pod.Spec.Containers[0].Args = nil
pod = e2epod.NewPodClient(f).Create(pod)
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
}
// The agnhost's image default entrypoint / args are: "/agnhost pause"
// which will print out "Paused".
gomega.Eventually(pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused"))
gomega.Eventually(ctx, pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused"))
})
/*
@@ -60,7 +60,7 @@ var _ = SIGDescribe("Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]",
})
})
@@ -76,7 +76,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2epodoutput.TestContainerOutput(f, "override command", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "override command", pod, 0, []string{
"[/agnhost-2 entrypoint-tester]",
})
})
@@ -90,7 +90,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2epodoutput.TestContainerOutput(f, "override all", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "override all", pod, 0, []string{
"[/agnhost-2 entrypoint-tester override arguments]",
})
})

View File

@@ -80,7 +80,7 @@ var _ = SIGDescribe("Downward API", func() {
fmt.Sprintf("POD_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
}
testDownwardAPI(f, podName, env, expectations)
testDownwardAPI(ctx, f, podName, env, expectations)
})
/*
@@ -106,7 +106,7 @@ var _ = SIGDescribe("Downward API", func() {
fmt.Sprintf("HOST_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
}
testDownwardAPI(f, podName, env, expectations)
testDownwardAPI(ctx, f, podName, env, expectations)
})
ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) {
@@ -155,7 +155,7 @@ var _ = SIGDescribe("Downward API", func() {
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
})
@@ -207,7 +207,7 @@ var _ = SIGDescribe("Downward API", func() {
"MEMORY_REQUEST=33554432",
}
testDownwardAPI(f, podName, env, expectations)
testDownwardAPI(ctx, f, podName, env, expectations)
})
/*
@@ -257,7 +257,7 @@ var _ = SIGDescribe("Downward API", func() {
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
})
/*
@@ -283,7 +283,7 @@ var _ = SIGDescribe("Downward API", func() {
"POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}",
}
testDownwardAPI(f, podName, env, expectations)
testDownwardAPI(ctx, f, podName, env, expectations)
})
})
@@ -344,7 +344,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
})
ginkgo.It("should provide default limits.hugepages-<pagesize> from node allocatable", func(ctx context.Context) {
@@ -381,13 +381,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
})
})
})
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
func testDownwardAPI(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@@ -416,9 +416,9 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations)
}

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
// Description: Adding an ephemeral container to pod.spec MUST result in the container running.
framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) {
ginkgo.By("creating a target pod")
pod := podClient.CreateSync(&v1.Pod{
pod := podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"},
Spec: v1.PodSpec{
Containers: []v1.Container{
@@ -70,14 +70,14 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
TTY: true,
},
}
err := podClient.AddEphemeralContainerSync(pod, ec, time.Minute)
err := podClient.AddEphemeralContainerSync(ctx, pod, ec, time.Minute)
framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod))
ginkgo.By("checking pod container endpoints")
// Can't use anything depending on kubectl here because it's not available in the node test environment
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
gomega.Expect(output).To(gomega.ContainSubstring("marco"))
log, err := e2epod.GetPodLogs(f.ClientSet, pod.Namespace, pod.Name, ecName)
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, pod.Namespace, pod.Name, ecName)
framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName)
gomega.Expect(log).To(gomega.ContainSubstring("polo"))
})

View File

@@ -60,7 +60,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil)
e2epodoutput.TestContainerOutput(f, "env composition", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
@@ -81,7 +81,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil)
e2epodoutput.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's command", pod, 0, []string{
"test-value",
})
})
@@ -101,7 +101,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod([]string{"sh", "-c"}, envVars, nil, nil)
pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""}
e2epodoutput.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's args", pod, 0, []string{
"test-value",
})
})
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
envVars[0].Value = pod.ObjectMeta.Name
pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"}
e2epodoutput.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "substitution in volume subpath", pod, 0, []string{
"0",
})
})
@@ -177,7 +177,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod(nil, envVars, mounts, volumes)
// Pod should fail
testPodFailSubpath(f, pod)
testPodFailSubpath(ctx, f, pod)
})
/*
@@ -216,7 +216,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod(nil, envVars, mounts, volumes)
// Pod should fail
testPodFailSubpath(f, pod)
testPodFailSubpath(ctx, f, pod)
})
/*
@@ -265,13 +265,13 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating the pod with failed condition")
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectError(err, "while waiting for pod to be running")
ginkgo.By("updating the pod")
podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) {
podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) {
if pod.ObjectMeta.Annotations == nil {
pod.ObjectMeta.Annotations = make(map[string]string)
}
@@ -279,11 +279,11 @@ var _ = SIGDescribe("Variable Expansion", func() {
})
ginkgo.By("waiting for pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("deleting the pod gracefully")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod")
})
@@ -337,48 +337,48 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating the pod")
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.By("waiting for pod running")
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("creating a file in subpath")
cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to write to subpath")
}
ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log"
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify file")
}
ginkgo.By("updating the annotation value")
podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) {
podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations["mysubpath"] = "mynewpath"
})
ginkgo.By("waiting for annotated pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for annotated pod to be running")
ginkgo.By("deleting the pod gracefully")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod")
})
})
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout)
err := e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for the pod container to fail")
}

View File

@@ -210,13 +210,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options)
return podClient.Watch(ctx, options)
},
}
var events []watch.Event
@@ -291,13 +291,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options)
return podClient.Watch(ctx, options)
},
}
var events []watch.Event
@@ -371,13 +371,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options)
return podClient.Watch(ctx, options)
},
}
@@ -496,13 +496,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options)
return podClient.Watch(ctx, options)
},
}

View File

@@ -50,7 +50,7 @@ var _ = SIGDescribe("Kubelet", func() {
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
*/
framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) {
podClient.CreateSync(&v1.Pod{
podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
@@ -66,9 +66,9 @@ var _ = SIGDescribe("Kubelet", func() {
},
},
})
gomega.Eventually(func() string {
gomega.Eventually(ctx, func() string {
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(context.TODO())
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(ctx)
if err != nil {
return ""
}
@@ -82,9 +82,9 @@ var _ = SIGDescribe("Kubelet", func() {
ginkgo.Context("when scheduling a busybox command that always fails in a pod", func() {
var podName string
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
@@ -108,8 +108,8 @@ var _ = SIGDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
*/
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) {
gomega.Eventually(func() error {
podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
gomega.Eventually(ctx, func() error {
podData, err := podClient.Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -133,7 +133,7 @@ var _ = SIGDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
*/
framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) {
err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{})
err := podClient.Delete(ctx, podName, metav1.DeleteOptions{})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})
@@ -156,12 +156,12 @@ var _ = SIGDescribe("Kubelet", func() {
},
}
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.By("Waiting for pod completion")
err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO())
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx)
framework.ExpectNoError(err)
defer rc.Close()
buf := new(bytes.Buffer)
@@ -183,7 +183,7 @@ var _ = SIGDescribe("Kubelet", func() {
*/
framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
isReadOnly := true
podClient.CreateSync(&v1.Pod{
podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
@@ -202,8 +202,8 @@ var _ = SIGDescribe("Kubelet", func() {
},
},
})
gomega.Eventually(func() string {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO())
gomega.Eventually(ctx, func() string {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx)
if err != nil {
return ""
}

View File

@@ -63,7 +63,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() {
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
ginkgo.By("Setting up the test")
config.setup()
config.setup(ctx)
ginkgo.By("Running the test")
config.verifyEtcHosts()
@@ -83,22 +83,22 @@ func (config *KubeletManagedHostConfig) verifyEtcHosts() {
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
}
func (config *KubeletManagedHostConfig) setup() {
func (config *KubeletManagedHostConfig) setup(ctx context.Context) {
ginkgo.By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork()
config.createPodWithoutHostNetwork(ctx)
ginkgo.By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork()
config.createPodWithHostNetwork(ctx)
}
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork(ctx context.Context) {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
config.pod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
func (config *KubeletManagedHostConfig) createPodWithHostNetwork(ctx context.Context) {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec)
}
func assertManagedStatus(

View File

@@ -86,10 +86,10 @@ var _ = SIGDescribe("Lease", func() {
},
}
createdLease, err := leaseClient.Create(context.TODO(), lease, metav1.CreateOptions{})
createdLease, err := leaseClient.Create(ctx, lease, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating Lease failed")
readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
readLease, err := leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec))
@@ -103,10 +103,10 @@ var _ = SIGDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(1),
}
_, err = leaseClient.Update(context.TODO(), createdLease, metav1.UpdateOptions{})
_, err = leaseClient.Update(ctx, createdLease, metav1.UpdateOptions{})
framework.ExpectNoError(err, "updating Lease failed")
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec))
@@ -123,10 +123,10 @@ var _ = SIGDescribe("Lease", func() {
patchBytes, err := getPatchBytes(readLease, patchedLease)
framework.ExpectNoError(err, "creating patch failed")
_, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = leaseClient.Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err, "patching Lease failed")
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec))
@@ -146,25 +146,25 @@ var _ = SIGDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(0),
},
}
_, err = leaseClient.Create(context.TODO(), lease2, metav1.CreateOptions{})
_, err = leaseClient.Create(ctx, lease2, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating Lease failed")
leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{})
leases, err := leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 2)
selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector()
err = leaseClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()})
err = leaseClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err, "couldn't delete collection")
leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{})
leases, err = leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 1)
err = leaseClient.Delete(context.TODO(), name, metav1.DeleteOptions{})
err = leaseClient.Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting Lease failed")
_, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
_, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err)
}
@@ -174,7 +174,7 @@ var _ = SIGDescribe("Lease", func() {
// created for every node by the corresponding Kubelet.
// That said, the objects themselves are small (~300B), so even with 5000
// of them, that gives ~1.5MB, which is acceptable.
_, err = leaseClient.List(context.TODO(), metav1.ListOptions{})
_, err = leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases from all namespace")
})
})

View File

@@ -75,8 +75,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
e2epod.NewAgnhostContainer("container-handle-https-request", nil, httpsPorts, httpsArgs...),
)
ginkgo.BeforeEach(func() {
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
ginkgo.BeforeEach(func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
targetNode = node.Name
nodeSelection := e2epod.NodeSelection{}
@@ -85,16 +85,16 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
podClient = e2epod.NewPodClient(f)
ginkgo.By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
newPod := podClient.CreateSync(ctx, podHandleHookRequest)
targetIP = newPod.Status.PodIP
targetURL = targetIP
if strings.Contains(targetIP, ":") {
targetURL = fmt.Sprintf("[%s]", targetIP)
}
})
testPodWithHook := func(podWithHook *v1.Pod) {
testPodWithHook := func(ctx context.Context, podWithHook *v1.Pod) {
ginkgo.By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook)
podClient.CreateSync(ctx, podWithHook)
const (
defaultHandler = iota
httpsHandler
@@ -107,13 +107,13 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
handlerContainer = httpsHandler
}
}
gomega.Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
gomega.Eventually(ctx, func(ctx context.Context) error {
return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
`GET /echo\?msg=poststart`)
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
ginkgo.By("check prestop hook")
if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil {
@@ -121,8 +121,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
handlerContainer = httpsHandler
}
}
gomega.Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
gomega.Eventually(ctx, func(ctx context.Context) error {
return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
`GET /echo\?msg=prestop`)
}, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
@@ -142,7 +142,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
}
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
/*
Release: v1.9
@@ -158,7 +158,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
},
}
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
/*
Release: v1.9
@@ -180,7 +180,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
/*
Release : v1.23
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
/*
Release : v1.9
@@ -225,7 +225,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
/*
Release : v1.23
@@ -248,7 +248,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook)
testPodWithHook(ctx, podWithHook)
})
})
})

View File

@@ -42,8 +42,8 @@ var _ = SIGDescribe("NodeLease", func() {
f := framework.NewDefaultFramework("node-lease-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() {
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
ginkgo.BeforeEach(func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
nodeName = node.Name
})
@@ -56,8 +56,8 @@ var _ = SIGDescribe("NodeLease", func() {
lease *coordinationv1.Lease
)
ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace")
gomega.Eventually(func() error {
lease, err = leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
gomega.Eventually(ctx, func() error {
lease, err = leaseClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -67,8 +67,8 @@ var _ = SIGDescribe("NodeLease", func() {
gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil())
ginkgo.By("check that node lease is updated at least once within the lease duration")
gomega.Eventually(func() error {
newLease, err := leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
gomega.Eventually(ctx, func() error {
newLease, err := leaseClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -93,8 +93,8 @@ var _ = SIGDescribe("NodeLease", func() {
err error
leaseList *coordinationv1.LeaseList
)
gomega.Eventually(func() error {
leaseList, err = leaseClient.List(context.TODO(), metav1.ListOptions{})
gomega.Eventually(ctx, func() error {
leaseList, err = leaseClient.List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
@@ -113,13 +113,13 @@ var _ = SIGDescribe("NodeLease", func() {
ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) {
ginkgo.By("wait until node is ready")
e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, nodeName, 5*time.Minute)
ginkgo.By("wait until there is node lease")
var err error
var lease *coordinationv1.Lease
gomega.Eventually(func() error {
lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(context.TODO(), nodeName, metav1.GetOptions{})
gomega.Eventually(ctx, func() error {
lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -134,10 +134,10 @@ var _ = SIGDescribe("NodeLease", func() {
// enough time has passed. So for here, keep checking the time diff
// between 2 NodeStatus report, until it is longer than lease duration
// (the same as nodeMonitorGracePeriod), or it doesn't change for at least leaseDuration
lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName)
lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName)
lastObserved := time.Now()
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName)
currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName)
currentObserved := time.Now()
if currentHeartbeatTime == lastHeartbeatTime {
@@ -178,7 +178,7 @@ var _ = SIGDescribe("NodeLease", func() {
// This check on node status is only meaningful when this e2e test is
// running as cluster e2e test, because node e2e test does not create and
// run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)
@@ -186,8 +186,8 @@ var _ = SIGDescribe("NodeLease", func() {
})
})
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
func getHeartbeatTimeAndStatus(ctx context.Context, clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)

View File

@@ -37,7 +37,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.Context("Kubelet", func() {
ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) {
linuxNode, err := findLinuxNode(f)
linuxNode, err := findLinuxNode(ctx, f)
framework.ExpectNoError(err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@@ -57,18 +57,18 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
NodeName: linuxNode.Name, // Set the node to an node which doesn't support
},
}
pod = e2epod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
// Check the pod is still not running
err = e2epod.WaitForPodFailedReason(f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort)
err = e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort)
framework.ExpectNoError(err)
})
})
})
// findLinuxNode finds a Linux node that is Ready and Schedulable
func findLinuxNode(f *framework.Framework) (v1.Node, error) {
func findLinuxNode(ctx context.Context, f *framework.Framework) (v1.Node, error) {
selector := labels.Set{"kubernetes.io/os": "linux"}.AsSelector()
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return v1.Node{}, err

View File

@@ -69,15 +69,15 @@ const (
)
// testHostIP tests that a pod gets a host IP
func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) {
func testHostIP(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod) {
ginkgo.By("creating pod")
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
p, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
@@ -92,40 +92,40 @@ func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) {
}
}
func startPodAndGetBackOffs(podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
func startPodAndGetBackOffs(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(ctx, pod)
time.Sleep(sleepAmount)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
podName := pod.Name
containerName := pod.Spec.Containers[0].Name
ginkgo.By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName)
_, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
ginkgo.By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName)
delay1, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
ginkgo.By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName)
delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
return delay1, delay2
}
func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
func getRestartDelay(ctx context.Context, podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now()
var previousRestartCount int32 = -1
var previousFinishedAt time.Time
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second)
pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
@@ -171,6 +171,7 @@ func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName
// expectNoErrorWithRetries checks if an error occurs with the given retry count.
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
// TODO (pohly): replace the entire function with gomege.Eventually.
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
@@ -203,7 +204,7 @@ var _ = SIGDescribe("Pods", func() {
*/
framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) {
name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
testHostIP(ctx, podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
@@ -248,37 +249,37 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options)
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0)
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = selector.String()
podList, err := podClient.List(context.TODO(), options)
podList, err := podClient.List(ctx, options)
return podList, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = selector.String()
return podClient.Watch(context.TODO(), options)
return podClient.Watch(ctx, options)
},
}
_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
defer w.Stop()
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
ctxUntil, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
if !cache.WaitForCacheSync(ctxUntil.Done(), informer.HasSynced) {
framework.Failf("Timeout while waiting to Pod informer to sync")
}
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod)
podClient.Create(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options)
pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
@@ -294,13 +295,13 @@ var _ = SIGDescribe("Pods", func() {
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
// save the running pod
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30))
err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying pod deletion was observed")
@@ -331,7 +332,7 @@ var _ = SIGDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options)
pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0)
})
@@ -364,27 +365,27 @@ var _ = SIGDescribe("Pods", func() {
})
ginkgo.By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
pod = podClient.CreateSync(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options)
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
ginkgo.By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
podClient.Update(ctx, name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
})
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options)
pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
framework.Logf("Pod update OK")
@@ -418,22 +419,22 @@ var _ = SIGDescribe("Pods", func() {
})
ginkgo.By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set{"time": value})
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options)
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
ginkgo.By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
podClient.Update(ctx, name, func(pod *v1.Pod) {
newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline
})
framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name))
})
/*
@@ -460,7 +461,7 @@ var _ = SIGDescribe("Pods", func() {
},
},
})
podClient.CreateSync(serverPod)
podClient.CreateSync(ctx, serverPod)
// This service exposes port 8080 of the test pod as a service on port 8765
// TODO(filbranden): We would like to use a unique service name such as:
@@ -487,7 +488,7 @@ var _ = SIGDescribe("Pods", func() {
},
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
@@ -523,7 +524,7 @@ var _ = SIGDescribe("Pods", func() {
"FOOSERVICE_PORT_8765_TCP_ADDR=",
}
expectNoErrorWithRetries(func() error {
return e2epodoutput.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring)
return e2epodoutput.MatchContainerOutput(ctx, f, pod, containerName, expectedVars, gomega.ContainSubstring)
}, maxRetries, "Container should have service environment variables set")
})
@@ -555,7 +556,7 @@ var _ = SIGDescribe("Pods", func() {
})
ginkgo.By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
pod = podClient.CreateSync(ctx, pod)
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
@@ -576,7 +577,7 @@ var _ = SIGDescribe("Pods", func() {
defer ws.Close()
buf := &bytes.Buffer{}
gomega.Eventually(func() error {
gomega.Eventually(ctx, func() error {
for {
var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil {
@@ -637,7 +638,7 @@ var _ = SIGDescribe("Pods", func() {
})
ginkgo.By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
@@ -692,18 +693,18 @@ var _ = SIGDescribe("Pods", func() {
},
})
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
delay1, delay2 := startPodAndGetBackOffs(ctx, podClient, pod, buildBackOffDuration)
ginkgo.By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
})
time.Sleep(syncLoopFrequency)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
delayAfterUpdate, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
@@ -733,7 +734,7 @@ var _ = SIGDescribe("Pods", func() {
},
})
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
// wait for a delay == capped delay of MaxContainerBackOff
@@ -743,7 +744,7 @@ var _ = SIGDescribe("Pods", func() {
err error
)
for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName)
delay1, err = getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
@@ -758,7 +759,7 @@ var _ = SIGDescribe("Pods", func() {
}
ginkgo.By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName)
delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
@@ -795,7 +796,7 @@ var _ = SIGDescribe("Pods", func() {
validatePodReadiness := func(expectReady bool) {
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err)
podReady := podutils.IsPodReady(pod)
res := expectReady == podReady
@@ -808,29 +809,29 @@ var _ = SIGDescribe("Pods", func() {
}
ginkgo.By("submitting the pod to kubernetes")
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
if podClient.PodIsReady(podName) {
e2epod.NewPodClient(f).Create(ctx, pod)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
if podClient.PodIsReady(ctx, podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)
}
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
_, err := podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err)
// Sleep for 10 seconds.
time.Sleep(syncLoopFrequency)
// Verify the pod is still not ready
if podClient.PodIsReady(podName) {
if podClient.PodIsReady(ctx, podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name)
}
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
_, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err)
validatePodReadiness(true)
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status")
_, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err)
validatePodReadiness(false)
@@ -850,7 +851,7 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("Create set of pods")
// create a set of pods in test namespace
for _, podTestName := range podTestNames {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx,
e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podTestName,
@@ -872,17 +873,17 @@ var _ = SIGDescribe("Pods", func() {
// wait as required for all 3 pods to be running
ginkgo.By("waiting for all 3 pods to be running")
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil)
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil)
framework.ExpectNoError(err, "3 pods not found running.")
// delete Collection of pods with a label in the current namespace
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{
LabelSelector: "type=Testing"})
framework.ExpectNoError(err, "failed to delete collection of pods")
// wait for all pods to be deleted
ginkgo.By("waiting for all pods to be deleted")
err = wait.PollImmediate(podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0))
err = wait.PollImmediateWithContext(ctx, podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0))
framework.ExpectNoError(err, "found a pod(s)")
})
@@ -906,10 +907,10 @@ var _ = SIGDescribe("Pods", func() {
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = testPodLabelsFlat
return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(context.TODO(), options)
return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(ctx, options)
},
}
podsList, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: testPodLabelsFlat})
podsList, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
framework.ExpectNoError(err, "failed to list Pods")
testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
@@ -928,13 +929,13 @@ var _ = SIGDescribe("Pods", func() {
},
})
ginkgo.By("creating a Pod with a static label")
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), testPod, metav1.CreateOptions{})
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(ctx, testPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
ginkgo.By("watching for Pod to be ready")
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel()
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
_, err = watchtools.Until(ctxUntil, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if pod, ok := event.Object.(*v1.Pod); ok {
found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name &&
pod.ObjectMeta.Namespace == testNamespaceName &&
@@ -953,7 +954,7 @@ var _ = SIGDescribe("Pods", func() {
if err != nil {
framework.Logf("failed to see event that pod is created: %v", err)
}
p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{})
p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get Pod %v in namespace %v", testPodName, testNamespaceName)
framework.ExpectEqual(p.Status.Phase, v1.PodRunning, "failed to see Pod %v in namespace %v running", p.ObjectMeta.Name, testNamespaceName)
@@ -972,11 +973,11 @@ var _ = SIGDescribe("Pods", func() {
},
})
framework.ExpectNoError(err, "failed to marshal JSON patch for Pod")
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(context.TODO(), testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{})
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(ctx, testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName)
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err = watchtools.Until(ctx, prePatchResourceVersion, w, func(event watch.Event) (bool, error) {
_, err = watchtools.Until(ctxUntil, prePatchResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified:
if pod, ok := event.Object.(*v1.Pod); ok {
@@ -994,7 +995,7 @@ var _ = SIGDescribe("Pods", func() {
}
ginkgo.By("getting the Pod and ensuring that it's patched")
pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName)
framework.ExpectEqual(pod.ObjectMeta.Labels["test-pod"], "patched", "failed to patch Pod - missing label")
framework.ExpectEqual(pod.Spec.Containers[0].Image, testPodImage2, "failed to patch Pod - wrong image")
@@ -1003,7 +1004,7 @@ var _ = SIGDescribe("Pods", func() {
var podStatusUpdate *v1.Pod
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}, "status")
podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
podStatusBytes, err := json.Marshal(podStatusUnstructured)
framework.ExpectNoError(err, "failed to marshal unstructured response")
@@ -1020,7 +1021,7 @@ var _ = SIGDescribe("Pods", func() {
}
}
framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to patch all relevant Pod conditions")
podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(context.TODO(), &podStatusUpdated, metav1.UpdateOptions{})
podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(ctx, &podStatusUpdated, metav1.UpdateOptions{})
return err
})
framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
@@ -1037,13 +1038,13 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("deleting the Pod via a Collection with a LabelSelector")
preDeleteResourceVersion := podStatusUpdate.ResourceVersion
err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
framework.ExpectNoError(err, "failed to delete Pod by collection")
ginkgo.By("watching for the Pod to be deleted")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute)
ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
_, err = watchtools.Until(ctx, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) {
_, err = watchtools.Until(ctxUntil, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
if pod, ok := event.Object.(*v1.Pod); ok {
@@ -1061,7 +1062,7 @@ var _ = SIGDescribe("Pods", func() {
if err != nil {
framework.Logf("failed to see %v event: %v", watch.Deleted, err)
}
postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{})
postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
var postDeletePodJSON []byte
if postDeletePod != nil {
postDeletePodJSON, _ = json.Marshal(postDeletePod)
@@ -1102,9 +1103,9 @@ var _ = SIGDescribe("Pods", func() {
},
},
})
pod, err := podClient.Create(context.TODO(), testPod, metav1.CreateOptions{})
pod, err := podClient.Create(ctx, testPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, ns)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Pod didn't start within time out period")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period")
ginkgo.By("patching /status")
podStatus := v1.PodStatus{
@@ -1114,7 +1115,7 @@ var _ = SIGDescribe("Pods", func() {
pStatusJSON, err := json.Marshal(podStatus)
framework.ExpectNoError(err, "Failed to marshal. %v", podStatus)
pStatus, err := podClient.Patch(context.TODO(), podName, types.MergePatchType,
pStatus, err := podClient.Patch(ctx, podName, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(pStatusJSON)+`}`),
metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "failed to patch pod: %q", podName)
@@ -1124,11 +1125,11 @@ var _ = SIGDescribe("Pods", func() {
})
})
func checkPodListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) {
return func() (bool, error) {
func checkPodListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
return func(ctx context.Context) (bool, error) {
var err error
list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: label})
if err != nil {

View File

@@ -55,14 +55,14 @@ var _ = SIGDescribe("PodTemplates", func() {
podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID())
// get a list of PodTemplates (in all namespaces to hit endpoint)
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-static=true",
})
framework.ExpectNoError(err, "failed to list all PodTemplates")
framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates")
// create a PodTemplate
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: podTemplateName,
Labels: map[string]string{
@@ -80,7 +80,7 @@ var _ = SIGDescribe("PodTemplates", func() {
framework.ExpectNoError(err, "failed to create PodTemplate")
// get template
podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})
podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get created PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName)
@@ -93,20 +93,20 @@ var _ = SIGDescribe("PodTemplates", func() {
},
})
framework.ExpectNoError(err, "failed to marshal patch data")
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{})
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(ctx, podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch PodTemplate")
// get template (ensure label is there)
podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})
podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found")
// delete the PodTemplate
err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(ctx, podTemplateName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PodTemplate")
// list the PodTemplates
podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{
podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-static=true",
})
framework.ExpectNoError(err, "failed to list PodTemplate")
@@ -125,7 +125,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ginkgo.By("Create set of pod templates")
// create a set of pod templates in test namespace
for _, podTemplateName := range podTemplateNames {
_, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(context.TODO(), &v1.PodTemplate{
_, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: podTemplateName,
Labels: map[string]string{"podtemplate-set": "true"},
@@ -144,7 +144,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ginkgo.By("get a list of pod templates with a label in the current namespace")
// get a list of pod templates
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-set=true",
})
framework.ExpectNoError(err, "failed to get a list of pod templates")
@@ -155,13 +155,13 @@ var _ = SIGDescribe("PodTemplates", func() {
// delete collection
framework.Logf("requesting DeleteCollection of pod templates")
err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "podtemplate-set=true"})
framework.ExpectNoError(err, "failed to delete all pod templates")
ginkgo.By("check that the list of pod templates matches the requested quantity")
err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(f, "podtemplate-set=true", 0))
err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(ctx, f, "podtemplate-set=true", 0))
framework.ExpectNoError(err, "failed to count required pod templates")
})
@@ -178,7 +178,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ptName := "podtemplate-" + utilrand.String(5)
ginkgo.By("Create a pod template")
ptResource, err := ptClient.Create(context.TODO(), &v1.PodTemplate{
ptResource, err := ptClient.Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: ptName,
},
@@ -196,12 +196,12 @@ var _ = SIGDescribe("PodTemplates", func() {
var updatedPT *v1.PodTemplate
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
ptResource, err = ptClient.Get(context.TODO(), ptName, metav1.GetOptions{})
ptResource, err = ptClient.Get(ctx, ptName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod template %s", ptName)
ptResource.Annotations = map[string]string{
"updated": "true",
}
updatedPT, err = ptClient.Update(context.TODO(), ptResource, metav1.UpdateOptions{})
updatedPT, err = ptClient.Update(ctx, ptResource, metav1.UpdateOptions{})
return err
})
framework.ExpectNoError(err)
@@ -211,13 +211,13 @@ var _ = SIGDescribe("PodTemplates", func() {
})
func checkPodTemplateListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) {
func checkPodTemplateListQuantity(ctx context.Context, f *framework.Framework, label string, quantity int) func() (bool, error) {
return func() (bool, error) {
var err error
framework.Logf("requesting list of pod templates to confirm quantity")
list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: label})
if err != nil {

View File

@@ -54,7 +54,7 @@ var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() {
ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) {
// Windows does not support privileged containers.
ginkgo.By("Creating a pod with a privileged container")
config.createPods()
config.createPods(ctx)
ginkgo.By("Executing in the privileged container")
config.run(config.privilegedContainer, true)
@@ -115,7 +115,7 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
}
}
func (c *PrivilegedPodTestConfig) createPods() {
func (c *PrivilegedPodTestConfig) createPods(ctx context.Context) {
podSpec := c.createPodsSpec()
c.pod = e2epod.NewPodClient(c.f).CreateSync(podSpec)
c.pod = e2epod.NewPodClient(c.f).CreateSync(ctx, podSpec)
}

View File

@@ -104,32 +104,32 @@ while true; do sleep 1; done
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
}
terminateContainer.Create()
terminateContainer.Create(ctx)
ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
gomega.Eventually(func() (int32, error) {
status, err := terminateContainer.GetStatus()
gomega.Eventually(ctx, func() (int32, error) {
status, err := terminateContainer.GetStatus(ctx)
return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.RestartCount))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
gomega.Eventually(ctx, terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
isReady, err := terminateContainer.IsReady()
isReady, err := terminateContainer.IsReady(ctx)
framework.ExpectEqual(isReady, testCase.Ready)
framework.ExpectNoError(err)
status, err := terminateContainer.GetStatus()
status, err := terminateContainer.GetStatus(ctx)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
framework.ExpectEqual(GetContainerState(status.State), testCase.State)
ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
gomega.Expect(terminateContainer.Delete()).To(gomega.Succeed())
gomega.Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse())
gomega.Expect(terminateContainer.Delete(ctx)).To(gomega.Succeed())
gomega.Eventually(ctx, terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse())
}
})
})
@@ -141,7 +141,7 @@ while true; do sleep 1; done
nonAdminUserName := "ContainerUser"
// Create and then terminate the container under defined PodPhase to verify if termination message matches the expected output. Lastly delete the created container.
matchTerminationMessage := func(container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) {
matchTerminationMessage := func(ctx context.Context, container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) {
container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: e2epod.NewPodClient(f),
@@ -150,14 +150,14 @@ while true; do sleep 1; done
}
ginkgo.By("create the container")
c.Create()
c.Create(ctx)
ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete))
ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
gomega.Eventually(ctx, c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
ginkgo.By("get the container status")
status, err := c.GetStatus()
status, err := c.GetStatus(ctx)
framework.ExpectNoError(err)
ginkgo.By("the container should be terminated")
@@ -168,7 +168,7 @@ while true; do sleep 1; done
gomega.Expect(status.State.Terminated.Message).Should(expectedMsg)
ginkgo.By("delete the container")
gomega.Expect(c.Delete()).To(gomega.Succeed())
gomega.Expect(c.Delete(ctx)).To(gomega.Succeed())
}
ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) {
@@ -184,7 +184,7 @@ while true; do sleep 1; done
} else {
container.SecurityContext.RunAsUser = &rootUser
}
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE"))
matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE"))
})
/*
@@ -205,7 +205,7 @@ while true; do sleep 1; done
} else {
container.SecurityContext.RunAsUser = &nonRootUser
}
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE"))
matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE"))
})
/*
@@ -221,7 +221,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodFailed, gomega.Equal("DONE"))
matchTerminationMessage(ctx, container, v1.PodFailed, gomega.Equal("DONE"))
})
/*
@@ -237,7 +237,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal(""))
matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal(""))
})
/*
@@ -253,7 +253,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("OK"))
matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("OK"))
})
})
@@ -262,7 +262,7 @@ while true; do sleep 1; done
// Images used for ConformanceContainer are not added into NodePrePullImageList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is v1.PullAlways, so it won't be blocked by framework image pre-pull list check.
imagePullTest := func(image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) {
imagePullTest := func(ctx context.Context, image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) {
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
if windowsImage {
// -t: Ping the specified host until stopped.
@@ -301,14 +301,14 @@ while true; do sleep 1; done
}
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{})
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.
checkContainerStatus := func() error {
status, err := container.GetStatus()
checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus(ctx)
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
}
@@ -333,7 +333,7 @@ while true; do sleep 1; done
}
}
// Check pod phase
phase, err := container.GetPhase()
phase, err := container.GetPhase(ctx)
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
}
@@ -348,15 +348,15 @@ while true; do sleep 1; done
for i := 1; i <= flakeRetry; i++ {
var err error
ginkgo.By("create the container")
container.Create()
container.Create(ctx)
ginkgo.By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
if err = checkContainerStatus(ctx); err == nil {
break
}
}
ginkgo.By("delete the container")
container.Delete()
_ = container.Delete(ctx)
if err == nil {
break
}
@@ -370,18 +370,18 @@ while true; do sleep 1; done
ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage)
imagePullTest(image, false, v1.PodPending, true, false)
imagePullTest(ctx, image, false, v1.PodPending, true, false)
})
ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) {
// NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows.
image := imageutils.GetE2EImage(imageutils.Agnhost)
imagePullTest(image, false, v1.PodRunning, false, false)
imagePullTest(ctx, image, false, v1.PodRunning, false, false)
})
ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine)
imagePullTest(image, false, v1.PodPending, true, false)
imagePullTest(ctx, image, false, v1.PodPending, true, false)
})
ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) {
@@ -391,7 +391,7 @@ while true; do sleep 1; done
image = imageutils.GetE2EImage(imageutils.AuthenticatedWindowsNanoServer)
isWindows = true
}
imagePullTest(image, true, v1.PodRunning, false, isWindows)
imagePullTest(ctx, image, true, v1.PodRunning, false, isWindows)
})
})
})

View File

@@ -54,15 +54,15 @@ var _ = SIGDescribe("RuntimeClass", func() {
*/
framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := f.Namespace.Name + "-nonexistent"
expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName))
expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName))
})
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed.
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
@@ -70,12 +70,12 @@ var _ = SIGDescribe("RuntimeClass", func() {
"reason": events.FailedCreatePodSandBox,
}.AsSelector().String()
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
err := e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout)
err := e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout)
if err != nil {
framework.Logf("Warning: did not get event about FailedCreatePodSandBox. Err: %v", err)
}
// Check the pod is still not running
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
})
@@ -87,10 +87,10 @@ var _ = SIGDescribe("RuntimeClass", func() {
// see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076
e2eskipper.SkipUnlessProviderIs("gce")
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod)
pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(ctx, f, pod)
})
/*
@@ -102,12 +102,12 @@ var _ = SIGDescribe("RuntimeClass", func() {
is not being tested here.
*/
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
framework.ExpectEqual(len(pods.Items), 1)
@@ -127,17 +127,17 @@ var _ = SIGDescribe("RuntimeClass", func() {
is not being tested here.
*/
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{
rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{
PodFixed: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"),
},
})
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
framework.ExpectEqual(len(pods.Items), 1)
@@ -154,16 +154,16 @@ var _ = SIGDescribe("RuntimeClass", func() {
Description: Pod requesting the deleted RuntimeClass must be rejected.
*/
framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "delete-me", "runc", nil)
rcName := createRuntimeClass(ctx, f, "delete-me", "runc", nil)
rcClient := f.ClientSet.NodeV1().RuntimeClasses()
ginkgo.By("Deleting RuntimeClass "+rcName, func() {
err := rcClient.Delete(context.TODO(), rcName, metav1.DeleteOptions{})
err := rcClient.Delete(ctx, rcName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName)
ginkgo.By("Waiting for the RuntimeClass to disappear")
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
_, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{})
_, err := rcClient.Get(ctx, rcName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil // done
}
@@ -174,7 +174,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
}))
})
expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName))
expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName))
})
/*
@@ -227,7 +227,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.By("getting /apis/node.k8s.io")
{
group := &metav1.APIGroup{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(context.TODO()).Into(group)
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(ctx).Into(group)
framework.ExpectNoError(err)
found := false
for _, version := range group.Versions {
@@ -260,43 +260,43 @@ var _ = SIGDescribe("RuntimeClass", func() {
// Main resource create/read/update/watch operations
ginkgo.By("creating")
createdRC, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
createdRC, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
_, err = rcClient.Create(ctx, rc, metav1.CreateOptions{})
if !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 409, got %#v", err)
}
_, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{})
_, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("watching")
framework.Logf("starting watch")
rcWatch, err := rcClient.Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
rcWatch, err := rcClient.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
// added for a watch
_, err = rcClient.Create(context.TODO(), rc3, metav1.CreateOptions{})
_, err = rcClient.Create(ctx, rc3, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("getting")
gottenRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
gottenRC, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(gottenRC.UID, createdRC.UID)
ginkgo.By("listing")
rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
rcs, err := rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 3, "filtered list should have 3 items")
ginkgo.By("patching")
patchedRC, err := rcClient.Patch(context.TODO(), createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
patchedRC, err := rcClient.Patch(ctx, createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedRC.Annotations["patched"], "true", "patched object should have the applied annotation")
ginkgo.By("updating")
csrToUpdate := patchedRC.DeepCopy()
csrToUpdate.Annotations["updated"] = "true"
updatedRC, err := rcClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{})
updatedRC, err := rcClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedRC.Annotations["updated"], "true", "updated object should have the applied annotation")
@@ -338,43 +338,43 @@ var _ = SIGDescribe("RuntimeClass", func() {
// main resource delete operations
ginkgo.By("deleting")
err = rcClient.Delete(context.TODO(), createdRC.Name, metav1.DeleteOptions{})
err = rcClient.Delete(ctx, createdRC.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Get(context.TODO(), createdRC.Name, metav1.GetOptions{})
_, err = rcClient.Get(ctx, createdRC.Name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
framework.Failf("expected 404, got %#v", err)
}
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items")
ginkgo.By("deleting a collection")
err = rcClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
err = rcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 0, "filtered list should have 0 items")
})
})
func deleteRuntimeClass(f *framework.Framework, name string) {
err := f.ClientSet.NodeV1().RuntimeClasses().Delete(context.TODO(), name, metav1.DeleteOptions{})
func deleteRuntimeClass(ctx context.Context, f *framework.Framework, name string) {
err := f.ClientSet.NodeV1().RuntimeClasses().Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete RuntimeClass resource")
}
// createRuntimeClass generates a RuntimeClass with the desired handler and a "namespaced" name,
// synchronously creates it, and returns the generated name.
func createRuntimeClass(f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string {
func createRuntimeClass(ctx context.Context, f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string {
uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name)
rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)
rc.Overhead = overhead
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{})
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create RuntimeClass resource")
return rc.GetName()
}
func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
func expectPodRejection(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectError(err, "should be forbidden")
if !apierrors.IsForbidden(err) {
framework.Failf("expected forbidden error, got %#v", err)
@@ -382,7 +382,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
}
// expectPodSuccess waits for the given pod to terminate successfully.
func expectPodSuccess(f *framework.Framework, pod *v1.Pod) {
func expectPodSuccess(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name))
ctx, f.ClientSet, pod.Name, f.Namespace.Name))
}

View File

@@ -49,7 +49,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -82,7 +82,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
@@ -97,7 +97,7 @@ var _ = SIGDescribe("Secrets", func() {
secret := secretForTest(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -126,7 +126,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})
@@ -138,7 +138,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Attempt to create a Secret with an empty key. The creation MUST fail.
*/
framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) {
secret, err := createEmptyKeySecretForTest(f)
secret, err := createEmptyKeySecretForTest(ctx, f)
framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name)
})
@@ -157,7 +157,7 @@ var _ = SIGDescribe("Secrets", func() {
secretTestName := "test-secret-" + string(uuid.NewUUID())
// create a secret in the test namespace
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), &v1.Secret{
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretTestName,
Labels: map[string]string{
@@ -173,7 +173,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero")
// list all secrets in all namespaces to ensure endpoint coverage
secretsList, err := f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{
secretsList, err := f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{
LabelSelector: "testsecret-constant=true",
})
framework.ExpectNoError(err, "failed to list secrets")
@@ -202,10 +202,10 @@ var _ = SIGDescribe("Secrets", func() {
"data": map[string][]byte{"key": []byte(secretPatchNewData)},
})
framework.ExpectNoError(err, "failed to marshal JSON")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(ctx, secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch secret")
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, secretCreatedName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get secret")
secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"]))
@@ -214,14 +214,14 @@ var _ = SIGDescribe("Secrets", func() {
framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch")
ginkgo.By("deleting the secret using a LabelSelector")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "testsecret=true",
})
framework.ExpectNoError(err, "failed to delete patched secret")
ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch")
// list all secrets in all namespaces
secretsList, err = f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{
secretsList, err = f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{
LabelSelector: "testsecret-constant=true",
})
framework.ExpectNoError(err, "failed to list secrets")
@@ -253,7 +253,7 @@ func secretForTest(namespace, name string) *v1.Secret {
}
}
func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
func createEmptyKeySecretForTest(ctx context.Context, f *framework.Framework) (*v1.Secret, error) {
secretName := "secret-emptykey-test-" + string(uuid.NewUUID())
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -265,5 +265,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
},
}
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
}

View File

@@ -76,23 +76,23 @@ var _ = SIGDescribe("Security Context", func() {
// with hostUsers=false the pod must use a new user namespace
podClient := e2epod.PodClientNS(f, f.Namespace.Name)
createdPod1 := podClient.Create(makePod(false))
createdPod2 := podClient.Create(makePod(false))
createdPod1 := podClient.Create(ctx, makePod(false))
createdPod2 := podClient.Create(ctx, makePod(false))
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
})
getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
if err != nil {
return "", err
}
podStatus, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
podStatus, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podStatus.Name, containerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podStatus.Name, containerName)
}
logs1, err := getLogs(createdPod1)
@@ -116,7 +116,7 @@ var _ = SIGDescribe("Security Context", func() {
// When running in the host's user namespace, the /proc/self/uid_map file content looks like:
// 0 0 4294967295
// Verify the value 4294967295 is present in the output.
e2epodoutput.TestContainerOutput(f, "read namespace", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "read namespace", pod, 0, []string{
"4294967295",
})
})
@@ -129,14 +129,14 @@ var _ = SIGDescribe("Security Context", func() {
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
// Create secret.
secret := secretForTest(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -240,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() {
// Each line should be "=0" that means root inside the container is the owner of the file.
downwardAPIVolFiles := 1
projectedFiles := len(secret.Data) + downwardAPIVolFiles
e2epodoutput.TestContainerOutput(f, "check file permissions", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "check file permissions", pod, 0, []string{
strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles),
})
})
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Security Context", func() {
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -300,7 +300,7 @@ var _ = SIGDescribe("Security Context", func() {
// Expect one line for each file on all the volumes.
// Each line should be "=200" (fsGroup) that means it was mapped to the
// right user inside the container.
e2epodoutput.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "check FSGroup is mapped correctly", pod, 0, []string{
strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)),
})
})
@@ -327,15 +327,15 @@ var _ = SIGDescribe("Security Context", func() {
},
}
}
createAndWaitUserPod := func(userid int64) {
createAndWaitUserPod := func(ctx context.Context, userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
}
/*
@@ -345,7 +345,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/
framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(65534)
createAndWaitUserPod(ctx, 65534)
})
/*
@@ -356,7 +356,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/
ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(0)
createAndWaitUserPod(ctx, 0)
})
})
@@ -390,19 +390,19 @@ var _ = SIGDescribe("Security Context", func() {
e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-nonroot-uid"
pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(nonRootTestUserID))
podClient.Create(pod)
podClient.Create(ctx, pod)
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1000"))
podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1000"))
})
ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) {
// creates a pod with RunAsUser, which is not supported on Windows.
e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-root-uid"
pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0))
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod)
ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err)
gomega.Expect(ev).NotTo(gomega.BeNil())
framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer)
@@ -410,17 +410,17 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("should run with an image specified user ID", func(ctx context.Context) {
name := "implicit-nonroot-uid"
pod := makeNonRootPod(name, nonRootImage, nil)
podClient.Create(pod)
podClient.Create(ctx, pod)
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1234"))
})
ginkgo.It("should not run without a specified user ID", func(ctx context.Context) {
name := "implicit-root-uid"
pod := makeNonRootPod(name, rootImage, nil)
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod)
ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err)
gomega.Expect(ev).NotTo(gomega.BeNil())
framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer)
@@ -448,18 +448,18 @@ var _ = SIGDescribe("Security Context", func() {
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
waitForFailure(f, podName, framework.PodStartTimeout)
waitForFailure(ctx, f, podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
}
return podName
@@ -474,7 +474,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access.
*/
ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(true)
createAndWaitUserPod(ctx, true)
})
/*
@@ -484,7 +484,7 @@ var _ = SIGDescribe("Security Context", func() {
Write operation MUST be allowed and Pod MUST be in Succeeded state.
*/
framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(false)
createAndWaitUserPod(ctx, false)
})
})
@@ -509,14 +509,14 @@ var _ = SIGDescribe("Security Context", func() {
},
}
}
createAndWaitUserPod := func(privileged bool) string {
createAndWaitUserPod := func(ctx context.Context, privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
return podName
}
/*
@@ -526,8 +526,8 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command.
*/
framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := createAndWaitUserPod(false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
podName := createAndWaitUserPod(ctx, false)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
@@ -539,8 +539,8 @@ var _ = SIGDescribe("Security Context", func() {
})
ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) {
podName := createAndWaitUserPod(true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
podName := createAndWaitUserPod(ctx, true)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
@@ -573,13 +573,13 @@ var _ = SIGDescribe("Security Context", func() {
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
createAndMatchOutput := func(ctx context.Context, podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(ctx, makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(podName, podName, output)
podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(ctx, podName, podName, output)
}
/*
@@ -593,7 +593,7 @@ var _ = SIGDescribe("Security Context", func() {
*/
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil {
if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
@@ -609,7 +609,7 @@ var _ = SIGDescribe("Security Context", func() {
framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil {
if err := createAndMatchOutput(ctx, podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
@@ -626,7 +626,7 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil {
if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
@@ -634,8 +634,8 @@ var _ = SIGDescribe("Security Context", func() {
})
// waitForFailure waits for pod to fail.
func waitForFailure(f *framework.Framework, name string, timeout time.Duration) {
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func waitForFailure(ctx context.Context, f *framework.Framework, name string, timeout time.Duration) {
gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:

View File

@@ -87,27 +87,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())
ginkgo.By("Waiting for pod completion")
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded")
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
ginkgo.By("Checking that the sysctl is actually updated")
@@ -146,7 +146,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
ginkgo.By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(context.TODO(), pod, metav1.CreateOptions{})
_, err := client.Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.BeNil())
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`))
@@ -168,11 +168,11 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
}
ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node")
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.By("Wait for pod failed reason")
// watch for pod failed reason instead of termination of pod
err := e2epod.WaitForPodFailedReason(f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
err := e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
framework.ExpectNoError(err)
})
@@ -195,27 +195,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel/shm_rmid_forced"}
ginkgo.By("Creating a pod with the kernel/shm_rmid_forced sysctl")
pod = podClient.Create(pod)
pod = podClient.Create(ctx, pod)
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())
ginkgo.By("Waiting for pod completion")
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded")
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
ginkgo.By("Checking that the sysctl is actually updated")

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, false, 0, nil)
doConfigMapE2EWithoutMappings(ctx, f, false, 0, nil)
})
/*
@@ -56,14 +56,14 @@ var _ = SIGDescribe("ConfigMap", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
})
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode)
doConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode)
})
/*
@@ -72,13 +72,13 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, true, 0, nil)
doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithoutMappings(f, true, 1001, nil)
doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
})
/*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, false, 0, nil)
doConfigMapE2EWithMappings(ctx, f, false, 0, nil)
})
/*
@@ -98,7 +98,7 @@ var _ = SIGDescribe("ConfigMap", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doConfigMapE2EWithMappings(f, false, 0, &mode)
doConfigMapE2EWithMappings(ctx, f, false, 0, &mode)
})
/*
@@ -107,13 +107,13 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, true, 0, nil)
doConfigMapE2EWithMappings(ctx, f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithMappings(f, true, 1001, nil)
doConfigMapE2EWithMappings(ctx, f, true, 1001, nil)
})
/*
@@ -122,7 +122,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID())
@@ -141,7 +141,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -149,22 +149,22 @@ var _ = SIGDescribe("ConfigMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
}
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
})
/*
@@ -173,7 +173,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID())
@@ -196,7 +196,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -216,20 +216,20 @@ var _ = SIGDescribe("ConfigMap", func() {
})
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
e2epod.NewPodClient(f).Create(ctx, pod)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
pollLogs1 := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
}
pollLogs2 := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name)
}
ginkgo.By("Waiting for pod with text data")
gomega.Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By("Waiting for pod with binary data")
gomega.Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff"))
gomega.Eventually(ctx, pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff"))
})
/*
@@ -238,7 +238,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/configmap-volumes"
@@ -284,12 +284,12 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@@ -375,44 +375,44 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
})
/*
@@ -432,7 +432,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -486,7 +486,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
@@ -505,28 +505,28 @@ var _ = SIGDescribe("ConfigMap", func() {
name := "immutable"
configMap := newConfigMap(f, name)
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Data["data-4"] = "value-4"
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Mark config map as immutable.
trueVal := true
currentConfigMap.Immutable = &trueVal
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace)
// Ensure data can't be changed now.
currentConfigMap.Data["data-5"] = "value-5"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err)
}
// Ensure config map can't be switched from immutable to mutable.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
if !*currentConfigMap.Immutable {
framework.Failf("currentConfigMap %s can be switched from immutable to mutable", currentConfigMap.Name)
@@ -534,20 +534,20 @@ var _ = SIGDescribe("ConfigMap", func() {
falseVal := false
currentConfigMap.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err)
}
// Ensure that metadata can be changed.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Ensure that immutable config map can be deleted.
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace)
})
@@ -556,7 +556,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
})
@@ -565,7 +565,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
})
})
@@ -584,7 +584,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
}
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
func doConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
groupID := int64(fsGroup)
var (
@@ -596,7 +596,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -622,10 +622,10 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
"content of file \"/etc/configmap-volume/data-1\": value-1",
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
func doConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
groupID := int64(fsGroup)
var (
@@ -638,7 +638,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -674,11 +674,11 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
}
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
@@ -691,12 +691,12 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath strin
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
}
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
@@ -706,7 +706,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
// creating a pod with configMap object, but with different key which is not present in configMap object.
@@ -721,8 +721,8 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
}
ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
}
func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@@ -62,7 +62,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024),
}
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
testDownwardAPIForEphemeralStorage(ctx, f, podName, env, expectations)
})
ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) {
@@ -98,13 +98,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
})
})
})
func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
func testDownwardAPIForEphemeralStorage(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@@ -131,9 +131,9 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations)
}

View File

@@ -55,7 +55,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@@ -71,7 +71,7 @@ var _ = SIGDescribe("Downward API volume", func() {
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Downward API volume", func() {
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@@ -102,7 +102,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@@ -137,20 +137,20 @@ var _ = SIGDescribe("Downward API volume", func() {
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container"
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
})
@@ -168,20 +168,20 @@ var _ = SIGDescribe("Downward API volume", func() {
containerName := "client-container"
ginkgo.By("Creating the pod")
pod = podClient.CreateSync(pod)
pod = podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
})
@@ -195,7 +195,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@@ -209,7 +209,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@@ -223,7 +223,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@@ -263,7 +263,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@@ -54,27 +54,27 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
})
ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) {
doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory)
doTestSetgidFSGroup(ctx, f, 0, v1.StorageMediumMemory)
})
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) {
doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory)
doTestSetgidFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory)
})
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) {
doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory)
doTestSubPathFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory)
})
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) {
doTest0644FSGroup(f, 0, v1.StorageMediumMemory)
doTest0644FSGroup(ctx, f, 0, v1.StorageMediumMemory)
})
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault)
doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumDefault)
})
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory)
doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumMemory)
})
})
@@ -85,7 +85,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumMemory)
doTestVolumeMode(ctx, f, 0, v1.StorageMediumMemory)
})
/*
@@ -95,7 +95,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumMemory)
doTest0644(ctx, f, 0, v1.StorageMediumMemory)
})
/*
@@ -105,7 +105,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumMemory)
doTest0666(ctx, f, 0, v1.StorageMediumMemory)
})
/*
@@ -115,7 +115,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumMemory)
doTest0777(ctx, f, 0, v1.StorageMediumMemory)
})
/*
@@ -125,7 +125,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumMemory)
doTest0644(ctx, f, nonRootUID, v1.StorageMediumMemory)
})
/*
@@ -135,7 +135,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumMemory)
doTest0666(ctx, f, nonRootUID, v1.StorageMediumMemory)
})
/*
@@ -145,7 +145,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumMemory)
doTest0777(ctx, f, nonRootUID, v1.StorageMediumMemory)
})
/*
@@ -155,7 +155,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumDefault)
doTestVolumeMode(ctx, f, 0, v1.StorageMediumDefault)
})
/*
@@ -165,7 +165,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumDefault)
doTest0644(ctx, f, 0, v1.StorageMediumDefault)
})
/*
@@ -175,7 +175,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumDefault)
doTest0666(ctx, f, 0, v1.StorageMediumDefault)
})
/*
@@ -185,7 +185,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumDefault)
doTest0777(ctx, f, 0, v1.StorageMediumDefault)
})
/*
@@ -195,7 +195,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumDefault)
doTest0644(ctx, f, nonRootUID, v1.StorageMediumDefault)
})
/*
@@ -205,7 +205,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumDefault)
doTest0666(ctx, f, nonRootUID, v1.StorageMediumDefault)
})
/*
@@ -215,7 +215,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumDefault)
doTest0777(ctx, f, nonRootUID, v1.StorageMediumDefault)
})
/*
@@ -283,8 +283,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
}
ginkgo.By("Creating Pod")
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
e2epod.NewPodClient(f).Create(ctx, pod)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("Reading file content from the nginx-container")
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
@@ -343,14 +343,14 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
var err error
ginkgo.By("Creating Pod")
pod = e2epod.NewPodClient(f).CreateSync(pod)
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.By("Waiting for the pod running")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
ginkgo.By("Getting the pod")
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
ginkgo.By("Reading empty dir size")
@@ -364,7 +364,7 @@ const (
volumeName = "test-volume"
)
func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTestSetgidFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -391,10 +391,10 @@ func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMed
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTestSubPathFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
subPath = "test-sub"
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -424,10 +424,10 @@ func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMe
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTestVolumeModeFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(uid, volumePath, source)
@@ -449,10 +449,10 @@ func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.Storag
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTest0644FSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -477,10 +477,10 @@ func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMediu
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTestVolumeMode(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(uid, volumePath, source)
@@ -499,10 +499,10 @@ func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTest0644(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -524,10 +524,10 @@ func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTest0666(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -549,10 +549,10 @@ func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
func doTest0777(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -574,7 +574,7 @@ func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {

View File

@@ -60,7 +60,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
e2epodoutput.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(ctx, f, "hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
@@ -89,7 +89,7 @@ var _ = SIGDescribe("HostPath", func() {
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
e2epodoutput.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{
e2epodoutput.TestContainerOutput(ctx, f, "hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
@@ -126,7 +126,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--retry_time=%d", retryDuration),
}
e2epodoutput.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{
e2epodoutput.TestContainerOutput(ctx, f, "hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})

View File

@@ -66,11 +66,11 @@ var _ = SIGDescribe("Projected combined", func() {
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -89,7 +89,7 @@ var _ = SIGDescribe("Projected combined", func() {
},
},
}
e2epodoutput.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "Check all projections for projected volume plugin", pod, 0, []string{
podName,
"secret-value-1",
"configmap-value-1",

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil)
doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, nil)
})
/*
@@ -56,14 +56,14 @@ var _ = SIGDescribe("Projected configMap", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
})
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode)
doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode)
})
/*
@@ -72,13 +72,13 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil)
doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil)
doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
})
/*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, false, 0, nil)
doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, nil)
})
/*
@@ -98,7 +98,7 @@ var _ = SIGDescribe("Projected configMap", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doProjectedConfigMapE2EWithMappings(f, false, 0, &mode)
doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, &mode)
})
/*
@@ -107,13 +107,13 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, true, 0, nil)
doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithMappings(f, true, 1001, nil)
doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil)
})
/*
@@ -122,7 +122,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
@@ -140,7 +140,7 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -148,22 +148,22 @@ var _ = SIGDescribe("Projected configMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1")
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
}
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
})
/*
@@ -172,7 +172,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-configmap-volumes"
@@ -218,12 +218,12 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@@ -327,44 +327,44 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
})
/*
@@ -384,7 +384,7 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -451,7 +451,7 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
})
@@ -462,7 +462,7 @@ var _ = SIGDescribe("Projected configMap", func() {
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
})
@@ -471,12 +471,12 @@ var _ = SIGDescribe("Projected configMap", func() {
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
})
})
func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
func doProjectedConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
groupID := int64(fsGroup)
var (
@@ -488,7 +488,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -513,10 +513,10 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
}
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
func doProjectedConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
groupID := int64(fsGroup)
var (
@@ -529,7 +529,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -564,7 +564,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
}
func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@@ -71,7 +71,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@@ -102,7 +102,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@@ -137,20 +137,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container"
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
})
@@ -168,20 +168,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
containerName := "client-container"
ginkgo.By("Creating the pod")
pod = podClient.CreateSync(pod)
pod = podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
})
@@ -195,7 +195,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@@ -209,7 +209,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@@ -223,7 +223,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@@ -263,7 +263,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@@ -44,7 +44,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
@@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected secret", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
@@ -67,7 +67,7 @@ var _ = SIGDescribe("Projected secret", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
})
/*
@@ -76,7 +76,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithMapping(f, nil)
doProjectedSecretE2EWithMapping(ctx, f, nil)
})
/*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected secret", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doProjectedSecretE2EWithMapping(f, &mode)
doProjectedSecretE2EWithMapping(ctx, f, &mode)
})
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) {
@@ -97,7 +97,7 @@ var _ = SIGDescribe("Projected secret", func() {
secret2Name = "projected-secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
@@ -105,10 +105,10 @@ var _ = SIGDescribe("Projected secret", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil)
})
/*
@@ -131,7 +131,7 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -201,7 +201,7 @@ var _ = SIGDescribe("Projected secret", func() {
}
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
fileModeRegexp,
})
@@ -213,7 +213,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-secret-volumes"
@@ -259,12 +259,12 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@@ -368,44 +368,44 @@ var _ = SIGDescribe("Projected secret", func() {
},
}
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
})
//The secret is in pending during volume creation until the secret objects are available
@@ -414,7 +414,7 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
@@ -424,12 +424,12 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
})
func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32,
func doProjectedSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32,
secretName string, fsGroup *int64, uid *int64) {
var (
volumeName = "projected-secret-volume"
@@ -439,7 +439,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -505,10 +505,10 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
}
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
func doProjectedSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) {
var (
name = "projected-secret-test-map-" + string(uuid.NewUUID())
volumeName = "projected-secret-volume"
@@ -518,7 +518,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -582,5 +582,5 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
}

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
@@ -56,7 +56,7 @@ var _ = SIGDescribe("Secrets", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
@@ -68,7 +68,7 @@ var _ = SIGDescribe("Secrets", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
})
/*
@@ -77,7 +77,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithMapping(f, nil)
doSecretE2EWithMapping(ctx, f, nil)
})
/*
@@ -88,7 +88,7 @@ var _ = SIGDescribe("Secrets", func() {
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
doSecretE2EWithMapping(ctx, f, &mode)
})
/*
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Secrets", func() {
secret2Name = "secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
@@ -111,10 +111,10 @@ var _ = SIGDescribe("Secrets", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil)
})
/*
@@ -137,7 +137,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -191,7 +191,7 @@ var _ = SIGDescribe("Secrets", func() {
}
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
fileModeRegexp,
})
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/secret-volumes"
@@ -249,12 +249,12 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@@ -334,44 +334,44 @@ var _ = SIGDescribe("Secrets", func() {
},
}
ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
})
/*
@@ -387,28 +387,28 @@ var _ = SIGDescribe("Secrets", func() {
name := "immutable"
secret := secretForTest(f.Namespace.Name, name)
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Data["data-4"] = []byte("value-4\n")
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Mark secret as immutable.
trueVal := true
currentSecret.Immutable = &trueVal
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace)
// Ensure data can't be changed now.
currentSecret.Data["data-5"] = []byte("value-5\n")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err)
}
// Ensure secret can't be switched from immutable to mutable.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
if !*currentSecret.Immutable {
framework.Failf("currentSecret %s can be switched from immutable to mutable", currentSecret.Name)
@@ -416,20 +416,20 @@ var _ = SIGDescribe("Secrets", func() {
falseVal := false
currentSecret.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err)
}
// Ensure that metadata can be changed.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Ensure that immutable secret can be deleted.
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace)
})
@@ -439,7 +439,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
@@ -449,7 +449,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
})
@@ -468,7 +468,7 @@ func secretForTest(namespace, name string) *v1.Secret {
}
}
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string,
func doSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32, secretName string,
fsGroup *int64, uid *int64) {
var (
volumeName = "secret-volume"
@@ -478,7 +478,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -535,10 +535,10 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
func doSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) {
var (
name = "secret-test-map-" + string(uuid.NewUUID())
volumeName = "secret-volume"
@@ -548,7 +548,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -603,11 +603,11 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
}
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
@@ -650,12 +650,12 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
},
}
ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
}
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
@@ -667,7 +667,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
// creating a pod with secret object, with the key which is not present in secret object.
@@ -711,6 +711,6 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
},
}
ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pod = e2epod.NewPodClient(f).Create(ctx, pod)
return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
}

View File

@@ -77,7 +77,7 @@ var _ = SIGDescribe("Volumes", func() {
////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{})
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{
@@ -95,13 +95,13 @@ var _ = SIGDescribe("Volumes", func() {
}
// Must match content of test/images/volumes-tester/nfs/index.html
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests)
})
})
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{})
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Volumes", func() {
},
}
// Must match content of test/images/volume-tester/nfs/index.html
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests)
})
})
})