From 2f6c4f5eab85d3f15cd80d21f4a0c353a8ceb10b Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 12 Dec 2022 10:11:10 +0100 Subject: [PATCH] e2e: use Ginkgo context All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted. --- test/e2e/apimachinery/aggregator.go | 96 +- test/e2e/apimachinery/apiserver_identity.go | 12 +- test/e2e/apimachinery/apply.go | 76 +- test/e2e/apimachinery/chunking.go | 18 +- .../apimachinery/crd_conversion_webhook.go | 62 +- test/e2e/apimachinery/crd_publish_openapi.go | 40 +- test/e2e/apimachinery/crd_validation_rules.go | 10 +- test/e2e/apimachinery/crd_watch.go | 32 +- .../custom_resource_definition.go | 28 +- test/e2e/apimachinery/discovery.go | 4 +- test/e2e/apimachinery/etcd_failure.go | 36 +- test/e2e/apimachinery/flowcontrol.go | 42 +- test/e2e/apimachinery/garbage_collector.go | 232 ++--- test/e2e/apimachinery/generated_clientset.go | 24 +- test/e2e/apimachinery/health_handlers.go | 10 +- test/e2e/apimachinery/namespace.go | 90 +- test/e2e/apimachinery/resource_quota.go | 496 +++++------ test/e2e/apimachinery/storage_version.go | 6 +- test/e2e/apimachinery/table_conversion.go | 16 +- test/e2e/apimachinery/watch.go | 74 +- test/e2e/apimachinery/webhook.go | 401 ++++----- test/e2e/apps/controller_revision.go | 66 +- test/e2e/apps/cronjob.go | 198 ++--- test/e2e/apps/daemon_restart.go | 58 +- test/e2e/apps/daemon_set.go | 300 +++---- test/e2e/apps/deployment.go | 312 +++---- test/e2e/apps/disruption.go | 198 ++--- test/e2e/apps/job.go | 168 ++-- test/e2e/apps/rc.go | 132 +-- test/e2e/apps/replica_set.go | 128 +-- test/e2e/apps/statefulset.go | 504 +++++------ test/e2e/apps/ttl_after_finished.go | 34 +- test/e2e/apps/wait.go | 22 +- test/e2e/architecture/conformance.go | 4 +- test/e2e/auth/certificates.go | 62 +- test/e2e/auth/node_authn.go | 14 +- test/e2e/auth/node_authz.go | 28 +- test/e2e/auth/selfsubjectreviews.go | 4 +- test/e2e/auth/service_accounts.go | 66 +- test/e2e/autoscaling/autoscaling_timer.go | 20 +- .../cluster_autoscaler_scalability.go | 106 +-- .../autoscaling/cluster_size_autoscaling.go | 461 +++++----- .../custom_metrics_stackdriver_autoscaling.go | 61 +- test/e2e/autoscaling/dns_autoscaling.go | 102 +-- .../autoscaling/horizontal_pod_autoscaling.go | 86 +- .../horizontal_pod_autoscaling_behavior.go | 88 +- test/e2e/chaosmonkey/chaosmonkey.go | 13 +- test/e2e/chaosmonkey/chaosmonkey_test.go | 11 +- test/e2e/cloud/gcp/addon_update.go | 56 +- test/e2e/cloud/gcp/apps/stateful_apps.go | 4 +- ..._account_admission_controller_migration.go | 2 +- test/e2e/cloud/gcp/cluster_upgrade.go | 6 +- .../e2e/cloud/gcp/common/upgrade_mechanics.go | 61 +- test/e2e/cloud/gcp/gke_node_pools.go | 12 +- test/e2e/cloud/gcp/ha_master.go | 48 +- test/e2e/cloud/gcp/kubelet_security.go | 8 +- .../cloud/gcp/network/kube_proxy_migration.go | 4 +- test/e2e/cloud/gcp/node/gpu.go | 6 +- test/e2e/cloud/gcp/node_lease.go | 30 +- test/e2e/cloud/gcp/reboot.go | 50 +- test/e2e/cloud/gcp/recreate_node.go | 24 +- test/e2e/cloud/gcp/resize_nodes.go | 32 +- test/e2e/cloud/gcp/restart.go | 18 +- test/e2e/cloud/nodes.go | 8 +- test/e2e/common/network/networking.go | 30 +- test/e2e/common/node/configmap.go | 32 +- test/e2e/common/node/container.go | 24 +- test/e2e/common/node/container_probe.go | 117 +-- test/e2e/common/node/containers.go | 14 +- test/e2e/common/node/downwardapi.go | 24 +- test/e2e/common/node/ephemeral_containers.go | 6 +- test/e2e/common/node/expansion.go | 42 +- test/e2e/common/node/init_container.go | 16 +- test/e2e/common/node/kubelet.go | 28 +- test/e2e/common/node/kubelet_etc_hosts.go | 16 +- test/e2e/common/node/lease.go | 26 +- test/e2e/common/node/lifecycle_hook.go | 32 +- test/e2e/common/node/node_lease.go | 32 +- test/e2e/common/node/pod_admission.go | 10 +- test/e2e/common/node/pods.go | 157 ++-- test/e2e/common/node/podtemplates.go | 32 +- test/e2e/common/node/privileged.go | 6 +- test/e2e/common/node/runtime.go | 60 +- test/e2e/common/node/runtimeclass.go | 82 +- test/e2e/common/node/secrets.go | 26 +- test/e2e/common/node/security_context.go | 100 +-- test/e2e/common/node/sysctl.go | 26 +- test/e2e/common/storage/configmap_volume.go | 136 +-- test/e2e/common/storage/downwardapi.go | 12 +- test/e2e/common/storage/downwardapi_volume.go | 46 +- test/e2e/common/storage/empty_dir.go | 82 +- test/e2e/common/storage/host_path.go | 6 +- test/e2e/common/storage/projected_combined.go | 6 +- .../e2e/common/storage/projected_configmap.go | 84 +- .../common/storage/projected_downwardapi.go | 46 +- test/e2e/common/storage/projected_secret.go | 68 +- test/e2e/common/storage/secrets_volume.go | 104 +-- test/e2e/common/storage/volumes.go | 8 +- test/e2e/dra/deploy.go | 8 +- test/e2e/dra/dra.go | 46 +- test/e2e/e2e.go | 60 +- test/e2e/framework/auth/helpers.go | 34 +- .../autoscaling/autoscaling_utils.go | 201 +++-- test/e2e/framework/daemonset/fixtures.go | 30 +- test/e2e/framework/debug/dump.go | 33 +- test/e2e/framework/debug/init/init.go | 26 +- .../framework/debug/log_size_monitoring.go | 18 +- .../debug/resource_usage_gatherer.go | 44 +- test/e2e/framework/deployment/fixtures.go | 10 +- test/e2e/framework/events/events.go | 10 +- test/e2e/framework/framework.go | 56 +- test/e2e/framework/ingress/ingress_utils.go | 200 ++--- test/e2e/framework/job/rest.go | 20 +- test/e2e/framework/job/wait.go | 38 +- test/e2e/framework/kubectl/kubectl_utils.go | 12 +- test/e2e/framework/kubelet/config.go | 9 +- test/e2e/framework/kubelet/kubelet_pods.go | 14 +- test/e2e/framework/kubelet/stats.go | 55 +- test/e2e/framework/kubesystem/kubesystem.go | 11 +- test/e2e/framework/manifest/manifest.go | 10 +- .../framework/metrics/api_server_metrics.go | 4 +- test/e2e/framework/metrics/grab.go | 14 +- test/e2e/framework/metrics/init/init.go | 6 +- test/e2e/framework/metrics/kubelet_metrics.go | 28 +- test/e2e/framework/metrics/metrics_grabber.go | 80 +- test/e2e/framework/network/utils.go | 170 ++-- test/e2e/framework/node/helper.go | 29 +- test/e2e/framework/node/init/init.go | 5 +- test/e2e/framework/node/node_killer.go | 25 +- test/e2e/framework/node/resource.go | 88 +- test/e2e/framework/node/ssh.go | 5 +- test/e2e/framework/node/wait.go | 44 +- test/e2e/framework/node/wait_test.go | 5 +- test/e2e/framework/pod/create.go | 32 +- test/e2e/framework/pod/delete.go | 26 +- test/e2e/framework/pod/exec_util.go | 17 +- test/e2e/framework/pod/output/output.go | 29 +- test/e2e/framework/pod/pod_client.go | 60 +- test/e2e/framework/pod/resource.go | 102 +-- test/e2e/framework/pod/resource_test.go | 3 +- test/e2e/framework/pod/wait.go | 132 +-- test/e2e/framework/pod/wait_test.go | 4 +- test/e2e/framework/provider.go | 17 +- test/e2e/framework/providers/aws/aws.go | 7 +- test/e2e/framework/providers/gce/firewall.go | 7 +- test/e2e/framework/providers/gce/gce.go | 24 +- test/e2e/framework/providers/gce/ingress.go | 26 +- test/e2e/framework/providers/gce/util.go | 6 +- test/e2e/framework/providers/gcp.go | 5 +- test/e2e/framework/pv/pv.go | 157 ++-- test/e2e/framework/rc/rc_utils.go | 14 +- test/e2e/framework/replicaset/wait.go | 16 +- test/e2e/framework/resource/resources.go | 43 +- test/e2e/framework/resource/runtimeobj.go | 12 +- test/e2e/framework/security/apparmor.go | 36 +- test/e2e/framework/service/jig.go | 216 ++--- test/e2e/framework/service/resource.go | 28 +- test/e2e/framework/service/util.go | 11 +- test/e2e/framework/service/wait.go | 14 +- test/e2e/framework/skipper/skipper.go | 18 +- test/e2e/framework/skipper/skipper_test.go | 4 +- test/e2e/framework/ssh/ssh.go | 40 +- test/e2e/framework/statefulset/fixtures.go | 5 +- test/e2e/framework/statefulset/rest.go | 70 +- test/e2e/framework/statefulset/wait.go | 56 +- test/e2e/framework/test_context.go | 7 +- test/e2e/framework/util.go | 76 +- test/e2e/framework/volume/fixtures.go | 84 +- test/e2e/instrumentation/core_events.go | 32 +- test/e2e/instrumentation/events.go | 38 +- .../instrumentation/logging/generic_soak.go | 9 +- .../instrumentation/monitoring/accelerator.go | 9 +- .../monitoring/custom_metrics_stackdriver.go | 32 +- .../monitoring/metrics_grabber.go | 16 +- .../instrumentation/monitoring/stackdriver.go | 9 +- .../monitoring/stackdriver_metadata_agent.go | 8 +- test/e2e/kubectl/kubectl.go | 132 +-- test/e2e/kubectl/portforward.go | 74 +- .../lifecycle/bootstrap/bootstrap_signer.go | 20 +- .../bootstrap/bootstrap_token_cleaner.go | 8 +- test/e2e/network/conntrack.go | 84 +- test/e2e/network/dns.go | 74 +- test/e2e/network/dns_common.go | 96 +- test/e2e/network/dns_configmap.go | 70 +- test/e2e/network/dns_scale_records.go | 16 +- test/e2e/network/dual_stack.go | 134 +-- test/e2e/network/endpointslice.go | 104 +-- test/e2e/network/endpointslicemirroring.go | 20 +- test/e2e/network/example_cluster_dns.go | 22 +- test/e2e/network/firewall.go | 34 +- test/e2e/network/funny_ips.go | 12 +- test/e2e/network/hostport.go | 16 +- test/e2e/network/ingress.go | 202 ++--- test/e2e/network/ingress_scale.go | 10 +- test/e2e/network/ingressclass.go | 66 +- test/e2e/network/kube_proxy.go | 10 +- test/e2e/network/loadbalancer.go | 350 ++++---- test/e2e/network/netpol/kubemanager.go | 40 +- test/e2e/network/netpol/network_legacy.go | 546 ++++++------ test/e2e/network/netpol/network_policy.go | 238 ++--- test/e2e/network/netpol/network_policy_api.go | 58 +- test/e2e/network/netpol/test_helper.go | 36 +- test/e2e/network/network_tiers.go | 26 +- test/e2e/network/networking.go | 192 ++-- test/e2e/network/networking_perf.go | 32 +- test/e2e/network/no_snat.go | 8 +- test/e2e/network/proxy.go | 40 +- test/e2e/network/scale/ingress.go | 48 +- .../network/scale/localrun/ingress_scale.go | 9 +- test/e2e/network/service.go | 830 +++++++++--------- test/e2e/network/service_latency.go | 23 +- test/e2e/network/topology_hints.go | 28 +- test/e2e/network/util.go | 15 +- test/e2e/node/apparmor.go | 12 +- test/e2e/node/crictl.go | 4 +- test/e2e/node/events.go | 12 +- test/e2e/node/examples.go | 14 +- test/e2e/node/kubelet.go | 72 +- test/e2e/node/kubelet_perf.go | 38 +- test/e2e/node/mount_propagation.go | 28 +- test/e2e/node/node_problem_detector.go | 66 +- test/e2e/node/pod_gc.go | 10 +- test/e2e/node/pods.go | 48 +- test/e2e/node/pre_stop.go | 28 +- test/e2e/node/runtimeclass.go | 32 +- test/e2e/node/security_context.go | 39 +- test/e2e/node/ssh.go | 6 +- test/e2e/node/taints.go | 94 +- test/e2e/scheduling/events.go | 13 +- test/e2e/scheduling/limit_range.go | 46 +- test/e2e/scheduling/nvidia-gpus.go | 82 +- test/e2e/scheduling/predicates.go | 226 ++--- test/e2e/scheduling/preemption.go | 154 ++-- test/e2e/scheduling/priorities.go | 83 +- test/e2e/scheduling/ubernetes_lite.go | 42 +- test/e2e/storage/csi_inline.go | 30 +- test/e2e/storage/csi_mock/base.go | 50 +- .../e2e/storage/csi_mock/csi_attach_volume.go | 24 +- .../e2e/storage/csi_mock/csi_fsgroup_mount.go | 6 +- .../storage/csi_mock/csi_fsgroup_policy.go | 6 +- .../csi_mock/csi_node_stage_error_cases.go | 30 +- .../e2e/storage/csi_mock/csi_selinux_mount.go | 6 +- .../csi_mock/csi_service_account_token.go | 10 +- test/e2e/storage/csi_mock/csi_snapshot.go | 66 +- .../storage/csi_mock/csi_storage_capacity.go | 20 +- .../storage/csi_mock/csi_volume_expansion.go | 54 +- test/e2e/storage/csi_mock/csi_volume_limit.go | 28 +- test/e2e/storage/csi_mock/csi_workload.go | 10 +- test/e2e/storage/csistoragecapacity.go | 34 +- test/e2e/storage/detach_mounted.go | 42 +- test/e2e/storage/drivers/csi.go | 73 +- test/e2e/storage/drivers/in_tree.go | 186 ++-- test/e2e/storage/empty_dir_wrapper.go | 48 +- test/e2e/storage/ephemeral_volume.go | 4 +- test/e2e/storage/external/external.go | 10 +- test/e2e/storage/flexvolume.go | 44 +- .../flexvolume_mounted_volume_resize.go | 34 +- test/e2e/storage/flexvolume_online_resize.go | 38 +- .../storage/framework/driver_operations.go | 5 +- .../storage/framework/snapshot_resource.go | 64 +- test/e2e/storage/framework/testdriver.go | 15 +- test/e2e/storage/framework/volume_resource.go | 48 +- .../generic_persistent_volume-disruptive.go | 20 +- test/e2e/storage/gke_local_ssd.go | 6 +- test/e2e/storage/host_path_type.go | 112 +-- test/e2e/storage/local_volume_resize.go | 38 +- test/e2e/storage/mounted_volume_resize.go | 28 +- .../nfs_persistent_volume-disruptive.go | 92 +- .../e2e/storage/non_graceful_node_shutdown.go | 32 +- test/e2e/storage/pd.go | 103 +-- test/e2e/storage/persistent_volumes-gce.go | 36 +- test/e2e/storage/persistent_volumes-local.go | 271 +++--- test/e2e/storage/persistent_volumes.go | 128 +-- test/e2e/storage/pv_protection.go | 30 +- test/e2e/storage/pvc_protection.go | 50 +- test/e2e/storage/pvc_storageclass.go | 28 +- test/e2e/storage/regional_pd.go | 84 +- test/e2e/storage/subpath.go | 18 +- test/e2e/storage/testsuites/base.go | 24 +- test/e2e/storage/testsuites/capacity.go | 23 +- test/e2e/storage/testsuites/disruptive.go | 46 +- test/e2e/storage/testsuites/ephemeral.go | 40 +- .../storage/testsuites/fsgroupchangepolicy.go | 24 +- test/e2e/storage/testsuites/multivolume.go | 126 +-- test/e2e/storage/testsuites/provisioning.go | 132 +-- .../storage/testsuites/readwriteoncepod.go | 50 +- test/e2e/storage/testsuites/snapshottable.go | 69 +- .../testsuites/snapshottable_stress.go | 31 +- test/e2e/storage/testsuites/subpath.go | 178 ++-- test/e2e/storage/testsuites/topology.go | 44 +- test/e2e/storage/testsuites/volume_expand.go | 78 +- test/e2e/storage/testsuites/volume_io.go | 26 +- test/e2e/storage/testsuites/volume_stress.go | 28 +- test/e2e/storage/testsuites/volumelimits.go | 50 +- test/e2e/storage/testsuites/volumemode.go | 88 +- test/e2e/storage/testsuites/volumeperf.go | 24 +- test/e2e/storage/testsuites/volumes.go | 29 +- test/e2e/storage/ubernetes_lite_volumes.go | 32 +- test/e2e/storage/utils/create.go | 122 +-- test/e2e/storage/utils/host_exec.go | 34 +- test/e2e/storage/utils/local.go | 141 +-- test/e2e/storage/utils/pod.go | 29 +- test/e2e/storage/utils/snapshot.go | 22 +- test/e2e/storage/utils/utils.go | 127 +-- test/e2e/storage/volume_metrics.go | 208 ++--- test/e2e/storage/volume_provisioning.go | 138 +-- test/e2e/storage/volumes.go | 6 +- .../vsphere/persistent_volumes-vsphere.go | 39 +- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 74 +- .../e2e/storage/vsphere/pvc_label_selector.go | 38 +- test/e2e/storage/vsphere/vsphere_scale.go | 26 +- .../storage/vsphere/vsphere_statefulsets.go | 40 +- test/e2e/storage/vsphere/vsphere_stress.go | 36 +- test/e2e/storage/vsphere/vsphere_utils.go | 100 +-- .../vsphere/vsphere_volume_cluster_ds.go | 20 +- .../vsphere/vsphere_volume_datastore.go | 16 +- .../vsphere/vsphere_volume_diskformat.go | 36 +- .../vsphere/vsphere_volume_disksize.go | 10 +- .../storage/vsphere/vsphere_volume_fstype.go | 55 +- .../vsphere/vsphere_volume_master_restart.go | 36 +- .../vsphere/vsphere_volume_node_delete.go | 22 +- .../vsphere/vsphere_volume_node_poweroff.go | 36 +- .../vsphere/vsphere_volume_ops_storm.go | 24 +- .../storage/vsphere/vsphere_volume_perf.go | 34 +- .../vsphere/vsphere_volume_placement.go | 68 +- .../vsphere/vsphere_volume_vpxd_restart.go | 24 +- .../vsphere/vsphere_volume_vsan_policy.go | 72 +- .../storage/vsphere/vsphere_zone_support.go | 124 +-- test/e2e/suites.go | 15 +- test/e2e/upgrades/apps/cassandra.go | 16 +- test/e2e/upgrades/apps/daemonsets.go | 22 +- test/e2e/upgrades/apps/deployments.go | 24 +- test/e2e/upgrades/apps/etcd.go | 16 +- test/e2e/upgrades/apps/job.go | 16 +- test/e2e/upgrades/apps/mysql.go | 16 +- test/e2e/upgrades/apps/replicasets.go | 16 +- test/e2e/upgrades/apps/statefulset.go | 36 +- ...eaccount_admission_controller_migration.go | 28 +- .../autoscaling/horizontal_pod_autoscalers.go | 29 +- .../upgrades/network/kube_proxy_migration.go | 52 +- test/e2e/upgrades/network/services.go | 35 +- test/e2e/upgrades/node/apparmor.go | 34 +- test/e2e/upgrades/node/configmaps.go | 18 +- test/e2e/upgrades/node/nvidia-gpu.go | 16 +- test/e2e/upgrades/node/secrets.go | 18 +- test/e2e/upgrades/node/sysctl.go | 32 +- .../upgrades/storage/persistent_volumes.go | 20 +- test/e2e/upgrades/storage/volume_mode.go | 22 +- test/e2e/upgrades/upgrade.go | 8 +- test/e2e/upgrades/upgrade_suite.go | 12 +- test/e2e/windows/cpu_limits.go | 10 +- test/e2e/windows/density.go | 28 +- test/e2e/windows/device_plugin.go | 6 +- test/e2e/windows/dns.go | 8 +- test/e2e/windows/gmsa_full.go | 72 +- test/e2e/windows/gmsa_kubelet.go | 4 +- test/e2e/windows/host_process.go | 80 +- test/e2e/windows/hybrid_network.go | 22 +- test/e2e/windows/kubelet_stats.go | 22 +- test/e2e/windows/memory_limits.go | 26 +- test/e2e/windows/reboot_node.go | 30 +- test/e2e/windows/security_context.go | 38 +- test/e2e/windows/service.go | 10 +- test/e2e/windows/volumes.go | 16 +- test/e2e_kubeadm/bootstrap_token_test.go | 2 +- test/e2e_kubeadm/controlplane_nodes_test.go | 8 +- test/e2e_kubeadm/networking_test.go | 6 +- test/e2e_kubeadm/nodes_test.go | 2 +- test/e2e_node/apparmor_test.go | 26 +- test/e2e_node/checkpoint_container.go | 11 +- test/e2e_node/container_log_rotation_test.go | 10 +- test/e2e_node/container_manager_test.go | 18 +- test/e2e_node/cpu_manager_metrics_test.go | 22 +- test/e2e_node/cpu_manager_test.go | 158 ++-- test/e2e_node/critical_pod_test.go | 32 +- test/e2e_node/density_test.go | 88 +- test/e2e_node/device_manager_test.go | 50 +- test/e2e_node/device_plugin_test.go | 94 +- test/e2e_node/e2e_node_suite_test.go | 21 +- test/e2e_node/eviction_test.go | 168 ++-- test/e2e_node/garbage_collector_test.go | 28 +- test/e2e_node/hugepages_test.go | 48 +- test/e2e_node/image_credential_provider.go | 2 +- test/e2e_node/image_id_test.go | 6 +- test/e2e_node/image_list.go | 8 +- test/e2e_node/lock_contention_linux_test.go | 2 +- test/e2e_node/log_path_test.go | 18 +- test/e2e_node/memory_manager_test.go | 80 +- test/e2e_node/mirror_pod_grace_period_test.go | 42 +- test/e2e_node/mirror_pod_test.go | 80 +- test/e2e_node/node_container_manager_test.go | 20 +- test/e2e_node/node_perf_test.go | 40 +- test/e2e_node/node_problem_detector_linux.go | 52 +- test/e2e_node/node_shutdown_linux_test.go | 88 +- test/e2e_node/os_label_rename_test.go | 26 +- test/e2e_node/pids_test.go | 8 +- test/e2e_node/pod_conditions_test.go | 22 +- test/e2e_node/pod_hostnamefqdn_test.go | 24 +- test/e2e_node/podresources_test.go | 168 ++-- test/e2e_node/pods_container_manager_test.go | 40 +- test/e2e_node/quota_lsci_test.go | 3 +- test/e2e_node/resource_collector.go | 6 +- test/e2e_node/resource_metrics_test.go | 28 +- test/e2e_node/resource_usage_test.go | 40 +- test/e2e_node/restart_test.go | 38 +- test/e2e_node/runtime_conformance_test.go | 12 +- test/e2e_node/runtimeclass_test.go | 8 +- test/e2e_node/seccompdefault_test.go | 6 +- test/e2e_node/security_context_test.go | 50 +- test/e2e_node/summary_test.go | 22 +- test/e2e_node/system_node_critical_test.go | 36 +- test/e2e_node/topology_manager_test.go | 160 ++-- test/e2e_node/util.go | 64 +- test/e2e_node/volume_manager_test.go | 10 +- test/integration/framework/perf_utils.go | 10 +- .../scheduler_perf/scheduler_perf_test.go | 10 +- test/utils/crd/crd_util.go | 5 +- test/utils/runners.go | 166 ++-- 418 files changed, 11489 insertions(+), 11369 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index ed13e37bdf6..214ee62b82e 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -85,27 +85,27 @@ var _ = SIGDescribe("Aggregator", func() { */ framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func(ctx context.Context) { // Testing a 1.17 version of the sample-apiserver - TestSampleAPIServer(f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer)) + TestSampleAPIServer(ctx, f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer)) }) }) -func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) { +func cleanTest(ctx context.Context, client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) { // delete the APIService first to avoid causing discovery errors - _ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", metav1.DeleteOptions{}) + _ = aggrclient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.wardle.example.com", metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", metav1.DeleteOptions{}) - _ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", metav1.DeleteOptions{}) - _ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", metav1.DeleteOptions{}) - _ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", metav1.DeleteOptions{}) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", metav1.DeleteOptions{}) - _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{}) - _ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", metav1.DeleteOptions{}) - _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespace).Delete(ctx, "sample-apiserver-deployment", metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespace).Delete(ctx, "sample-apiserver-secret", metav1.DeleteOptions{}) + _ = client.CoreV1().Services(namespace).Delete(ctx, "sample-api", metav1.DeleteOptions{}) + _ = client.CoreV1().ServiceAccounts(namespace).Delete(ctx, "sample-apiserver", metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(ctx, "wardler-auth-reader", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoleBindings().Delete(ctx, "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoles().Delete(ctx, "sample-apiserver-reader", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoleBindings().Delete(ctx, "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{}) } // TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10 // will work on the current Aggregator/API-Server. -func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Clientset, image string) { +func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient *aggregatorclient.Clientset, image string) { ginkgo.By("Registering the sample API server.") client := f.ClientSet restClient := client.Discovery().RESTClient() @@ -128,11 +128,11 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl "tls.key": certCtx.key, }, } - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %s in namespace %s", secretName, namespace) // kubectl create -f clusterrole.yaml - _, err = client.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ + _, err = client.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"}, Rules: []rbacv1.PolicyRule{ @@ -142,7 +142,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader") - _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ + _, err = client.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler:" + namespace + ":sample-apiserver-reader", }, @@ -163,7 +163,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader") // kubectl create -f authDelegator.yaml - _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ + _, err = client.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler:" + namespace + ":auth-delegator", }, @@ -252,7 +252,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl d.Spec.Template.Spec.Containers = containers d.Spec.Template.Spec.Volumes = volumes - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -280,16 +280,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-api", namespace) // kubectl create -f serviceAccount.yaml sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}} - _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) + _, err = client.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace) // kubectl create -f auth-reader.yaml - _, err = client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ + _, err = client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler-auth-reader", Annotations: map[string]string{ @@ -319,7 +319,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace) // kubectl create -f apiservice.yaml - _, err = aggrclient.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ + _, err = aggrclient.ApiregistrationV1().APIServices().Create(ctx, &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"}, Spec: apiregistrationv1.APIServiceSpec{ Service: &apiregistrationv1.ServiceReference{ @@ -341,14 +341,14 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl currentPods *v1.PodList ) - err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) { + err = pollTimed(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) { - currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(context.TODO(), "v1alpha1.wardle.example.com", metav1.GetOptions{}) - currentPods, _ = client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(ctx, "v1alpha1.wardle.example.com", metav1.GetOptions{}) + currentPods, _ = client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders") request.SetHeader("Accept", "application/json") - _, err := request.DoRaw(context.TODO()) + _, err := request.DoRaw(ctx) if err != nil { status, ok := err.(*apierrors.StatusError) if !ok { @@ -374,7 +374,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl if currentPods != nil { for _, pod := range currentPods.Items { for _, container := range pod.Spec.Containers { - logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name) + logs, err := e2epod.GetPodLogs(ctx, client, namespace, pod.Name, container.Name) framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) } } @@ -388,7 +388,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl // curl -k -v -XPOST https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders // Request Body: {"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}} flunder := `{"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}` - result := restClient.Post().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).SetHeader("Accept", "application/json").Do(context.TODO()) + result := restClient.Post().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).SetHeader("Accept", "application/json").Do(ctx) framework.ExpectNoError(result.Error(), "creating a new flunders resource") var statusCode int result.StatusCode(&statusCode) @@ -403,31 +403,31 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectEqual(u.GetKind(), "Flunder") framework.ExpectEqual(u.GetName(), flunderName) - pods, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "getting pods for flunders service") // kubectl get flunders -v 9 // curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders - contents, err := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO()) + contents, err := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(ctx) framework.ExpectNoError(err, "attempting to get a newly created flunders resource") var flundersList samplev1alpha1.FlunderList err = json.Unmarshal(contents, &flundersList) - validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") + validateErrorWithDebugInfo(ctx, f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") if len(flundersList.Items) != 1 { framework.Failf("failed to get back the correct flunders list %v", flundersList) } // kubectl delete flunder test-flunder -v 9 // curl -k -v -XDELETE https://35.193.112.40/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/test-flunder - _, err = restClient.Delete().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw(context.TODO()) - validateErrorWithDebugInfo(f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items) + _, err = restClient.Delete().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw(ctx) + validateErrorWithDebugInfo(ctx, f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items) // kubectl get flunders -v 9 // curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders - contents, err = restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO()) + contents, err = restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(ctx) framework.ExpectNoError(err, "confirming delete of a newly created flunders resource") err = json.Unmarshal(contents, &flundersList) - validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") + validateErrorWithDebugInfo(ctx, f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") if len(flundersList.Items) != 0 { framework.Failf("failed to get back the correct deleted flunders list %v", flundersList) } @@ -460,11 +460,11 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl unstruct := &unstructured.Unstructured{} err = unstruct.UnmarshalJSON(jsonFlunder) framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client") - _, err = dynamicClient.Create(context.TODO(), unstruct, metav1.CreateOptions{}) + _, err = dynamicClient.Create(ctx, unstruct, metav1.CreateOptions{}) framework.ExpectNoError(err, "listing flunders using dynamic client") // kubectl get flunders - unstructuredList, err := dynamicClient.List(context.TODO(), metav1.ListOptions{}) + unstructuredList, err := dynamicClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "listing flunders using dynamic client") if len(unstructuredList.Items) != 1 { framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList) @@ -473,7 +473,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl ginkgo.By("Read Status for v1alpha1.wardle.example.com") statusContent, err := restClient.Get(). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com/status"). - SetHeader("Accept", "application/json").DoRaw(context.TODO()) + SetHeader("Accept", "application/json").DoRaw(ctx) framework.ExpectNoError(err, "No response for .../apiservices/v1alpha1.wardle.example.com/status. Error: %v", err) var jr *apiregistrationv1.APIService @@ -485,7 +485,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl patchContent, err := restClient.Patch(types.MergePatchType). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com"). SetHeader("Accept", "application/json"). - Body([]byte(`{"spec":{"versionPriority": 400}}`)).DoRaw(context.TODO()) + Body([]byte(`{"spec":{"versionPriority": 400}}`)).DoRaw(ctx) framework.ExpectNoError(err, "Patch failed for .../apiservices/v1alpha1.wardle.example.com. Error: %v", err) err = json.Unmarshal([]byte(patchContent), &jr) @@ -495,7 +495,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl ginkgo.By("List APIServices") listApiservices, err := restClient.Get(). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices"). - SetHeader("Accept", "application/json").DoRaw(context.TODO()) + SetHeader("Accept", "application/json").DoRaw(ctx) framework.ExpectNoError(err, "No response for /apis/apiregistration.k8s.io/v1/apiservices Error: %v", err) @@ -516,41 +516,41 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl } // kubectl delete flunder test-flunder - err = dynamicClient.Delete(context.TODO(), flunderName, metav1.DeleteOptions{}) - validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items) + err = dynamicClient.Delete(ctx, flunderName, metav1.DeleteOptions{}) + validateErrorWithDebugInfo(ctx, f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items) // kubectl get flunders - unstructuredList, err = dynamicClient.List(context.TODO(), metav1.ListOptions{}) + unstructuredList, err = dynamicClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "listing flunders using dynamic client") if len(unstructuredList.Items) != 0 { framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList) } - cleanTest(client, aggrclient, namespace) + cleanTest(ctx, client, aggrclient, namespace) } // pollTimed will call Poll but time how long Poll actually took. // It will then framework.Logf the msg with the duration of the Poll. // It is assumed that msg will contain one %s for the elapsed time. -func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error { +func pollTimed(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc, msg string) error { defer func(start time.Time, msg string) { elapsed := time.Since(start) framework.Logf(msg, elapsed) }(time.Now(), msg) - return wait.Poll(interval, timeout, condition) + return wait.PollWithContext(ctx, interval, timeout, condition) } -func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) { +func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) { if err != nil { namespace := f.Namespace.Name msg := fmt.Sprintf(msg, fields...) msg += fmt.Sprintf(" but received unexpected error:\n%v", err) client := f.ClientSet - ep, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), "sample-api", metav1.GetOptions{}) + ep, err := client.CoreV1().Endpoints(namespace).Get(ctx, "sample-api", metav1.GetOptions{}) if err == nil { msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep) } - pds, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + pds, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) if err == nil { msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds) msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods) diff --git a/test/e2e/apimachinery/apiserver_identity.go b/test/e2e/apimachinery/apiserver_identity.go index 826a287fd93..72db85329d7 100644 --- a/test/e2e/apimachinery/apiserver_identity.go +++ b/test/e2e/apimachinery/apiserver_identity.go @@ -37,7 +37,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -func getControlPlaneHostname(node *v1.Node) (string, error) { +func getControlPlaneHostname(ctx context.Context, node *v1.Node) (string, error) { nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP) if len(nodeAddresses) == 0 { return "", errors.New("no valid addresses to use for SSH") @@ -46,7 +46,7 @@ func getControlPlaneHostname(node *v1.Node) (string, error) { controlPlaneAddress := nodeAddresses[0] host := controlPlaneAddress + ":" + e2essh.SSHPort - result, err := e2essh.SSH("hostname", host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, "hostname", host, framework.TestContext.Provider) if err != nil { return "", err } @@ -59,7 +59,7 @@ func getControlPlaneHostname(node *v1.Node) (string, error) { } // restartAPIServer attempts to restart the kube-apiserver on a node -func restartAPIServer(node *v1.Node) error { +func restartAPIServer(ctx context.Context, node *v1.Node) error { nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP) if len(nodeAddresses) == 0 { return errors.New("no valid addresses to use for SSH") @@ -68,7 +68,7 @@ func restartAPIServer(node *v1.Node) error { controlPlaneAddress := nodeAddresses[0] cmd := "pidof kube-apiserver | xargs sudo kill" framework.Logf("Restarting kube-apiserver via ssh, running: %v", cmd) - result, err := e2essh.SSH(cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) return fmt.Errorf("couldn't restart kube-apiserver: %v", err) @@ -121,7 +121,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func( framework.ExpectEqual(len(leases.Items), len(controlPlaneNodes), "unexpected number of leases") for _, node := range controlPlaneNodes { - hostname, err := getControlPlaneHostname(&node) + hostname, err := getControlPlaneHostname(ctx, &node) framework.ExpectNoError(err) hash := sha256.Sum256([]byte(hostname)) @@ -132,7 +132,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func( oldHolderIdentity := lease.Spec.HolderIdentity lastRenewedTime := lease.Spec.RenewTime - err = restartAPIServer(&node) + err = restartAPIServer(ctx, &node) framework.ExpectNoError(err) err = wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { diff --git a/test/e2e/apimachinery/apply.go b/test/e2e/apimachinery/apply.go index f7ecc35fe65..86c4d74dd7b 100644 --- a/test/e2e/apimachinery/apply.go +++ b/test/e2e/apimachinery/apply.go @@ -56,11 +56,11 @@ var _ = SIGDescribe("ServerSideApply", func() { ns = f.Namespace.Name }) - ginkgo.AfterEach(func() { - _ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment", metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment-shared-unset", metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment-shared-map-item-removal", metav1.DeleteOptions{}) - _ = client.CoreV1().Pods(ns).Delete(context.TODO(), "test-pod", metav1.DeleteOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment", metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment-shared-unset", metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment-shared-map-item-removal", metav1.DeleteOptions{}) + _ = client.CoreV1().Pods(ns).Delete(ctx, "test-pod", metav1.DeleteOptions{}) }) /* @@ -119,13 +119,13 @@ var _ = SIGDescribe("ServerSideApply", func() { Name(tc.name). Param("fieldManager", "apply_test"). Body([]byte(tc.body)). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) } - _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(context.TODO()).Get() + _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(ctx).Get() if err != nil { framework.Failf("Failed to retrieve object: %v", err) } @@ -137,7 +137,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Name(tc.name). Param("fieldManager", "apply_test_2"). Body([]byte(tc.body)). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to re-apply object using Apply patch: %v", err) @@ -203,13 +203,13 @@ var _ = SIGDescribe("ServerSideApply", func() { Name(tc.name). Param("fieldManager", "apply_test"). Body([]byte(tc.body)). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) } - _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(context.TODO()).Get() + _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(ctx).Get() if err != nil { framework.Failf("Failed to retrieve object: %v", err) } @@ -221,12 +221,12 @@ var _ = SIGDescribe("ServerSideApply", func() { Name(tc.name). Param("fieldManager", "apply_test2"). Body([]byte(tc.statusPatch)). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to Apply Status using Apply patch: %v", err) } - pod, err := client.CoreV1().Pods(ns).Get(context.TODO(), "test-pod", metav1.GetOptions{}) + pod, err := client.CoreV1().Pods(ns).Get(ctx, "test-pod", metav1.GetOptions{}) framework.ExpectNoError(err, "retrieving test pod") for _, c := range pod.Status.Conditions { if c.Type == "MyStatus" { @@ -242,13 +242,13 @@ var _ = SIGDescribe("ServerSideApply", func() { Name(tc.name). Param("fieldManager", "apply_test2"). Body([]byte(tc.statusPatch)). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to Apply Status using Apply patch: %v", err) } - pod, err = client.CoreV1().Pods(ns).Get(context.TODO(), "test-pod", metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(ns).Get(ctx, "test-pod", metav1.GetOptions{}) framework.ExpectNoError(err, "retrieving test pod") myStatusFound := false @@ -311,7 +311,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) } @@ -352,12 +352,12 @@ var _ = SIGDescribe("ServerSideApply", func() { Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { framework.Failf("Failed to remove container port using Apply patch: %v", err) } - deployment, err := client.AppsV1().Deployments(ns).Get(context.TODO(), "deployment", metav1.GetOptions{}) + deployment, err := client.AppsV1().Deployments(ns).Get(ctx, "deployment", metav1.GetOptions{}) if err != nil { framework.Failf("Failed to retrieve object: %v", err) } @@ -415,7 +415,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Name("deployment-shared-unset"). Param("fieldManager", fieldManager). Body(apply). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) @@ -459,7 +459,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Name("deployment-shared-unset"). Param("fieldManager", "shared_owner_1"). Body(apply). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) @@ -518,7 +518,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) } @@ -528,7 +528,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Namespace(ns). Resource("deployments"). Name("deployment"). - Body([]byte(`{"spec":{"replicas": 5}}`)).Do(context.TODO()).Get() + Body([]byte(`{"spec":{"replicas": 5}}`)).Do(ctx).Get() if err != nil { framework.Failf("Failed to patch object: %v", err) } @@ -539,7 +539,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Resource("deployments"). Name("deployment"). Param("fieldManager", "apply_test"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err == nil { framework.Failf("Expecting to get conflicts when applying object") } @@ -558,7 +558,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Name("deployment"). Param("force", "true"). Param("fieldManager", "apply_test"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { framework.Failf("Failed to apply object with force: %v", err) } @@ -678,7 +678,7 @@ spec: Name(name). Param("fieldManager", "apply_test"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) } @@ -706,7 +706,7 @@ spec: Name(name). Param("fieldManager", "apply_test"). Body(yamlBodyBeta). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) } @@ -719,7 +719,7 @@ spec: AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). Name(name). Body([]byte(`{"metadata":{"finalizers":[]}}`)). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to reset finalizers: %v:\n%v", err, string(result)) } @@ -730,7 +730,7 @@ spec: AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). Name(name). Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to add finalizer with merge patch: %v:\n%v", err, string(result)) } @@ -745,7 +745,7 @@ spec: Param("fieldManager", "apply_test"). SetHeader("Accept", "application/json"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result)) } @@ -758,7 +758,7 @@ spec: AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). Name(name). Body([]byte(`{"spec":{"replicas": 5}}`)). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result)) } @@ -770,7 +770,7 @@ spec: Name(name). Param("fieldManager", "apply_test"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err == nil { framework.Failf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } @@ -789,7 +789,7 @@ spec: Param("force", "true"). Param("fieldManager", "apply_test"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result)) } @@ -810,7 +810,7 @@ spec: - name: "y" containerPort: 80 protocol: TCP`, apiVersion, kind, name))). - DoRaw(context.TODO()) + DoRaw(ctx) if err == nil { framework.Failf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result) } @@ -838,7 +838,7 @@ spec: containerPort: 8080 protocol: TCP`, apiVersion, kind, name))). SetHeader("Accept", "application/json"). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to add a new list item to the object as a different applier: %v:\n%v", err, string(result)) } @@ -872,7 +872,7 @@ spec: Name("should-not-exist"). Param("fieldManager", "apply_test"). Body(notExistingYAMLBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsNotFound(err) { framework.Failf("create on update should fail with notFound, got %v", err) } @@ -932,7 +932,7 @@ spec: Name(name). Param("fieldManager", "apply_test"). Body(crdYamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) } @@ -953,7 +953,7 @@ spec: Param("fieldManager", "apply_test_2"). Param("force", "true"). Body(crdYamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) } @@ -1006,7 +1006,7 @@ spec: Name("deployment-shared-map-item-removal"). Param("fieldManager", "test_applier"). Body(apply). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) @@ -1055,7 +1055,7 @@ spec: Name("deployment-shared-map-item-removal"). Param("fieldManager", "test_applier"). Body(apply). - Do(context.TODO()). + Do(ctx). Get() if err != nil { framework.Failf("Failed to create object using Apply patch: %v", err) diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index bb0ea369ad5..5630887bbbd 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -48,14 +48,14 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { f := framework.NewDefaultFramework("chunking") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet client := c.CoreV1().PodTemplates(ns) ginkgo.By("creating a large number of resources") - workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) { + workqueue.ParallelizeUntil(ctx, 20, numberOfTotalResources, func(i int) { for tries := 3; tries >= 0; tries-- { - _, err := client.Create(context.TODO(), &v1.PodTemplate{ + _, err := client.Create(ctx, &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("template-%04d", i), }, @@ -87,7 +87,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { var lastRV string for { opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1) - list, err := client.List(context.TODO(), opts) + list, err := client.List(ctx, opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit)) @@ -118,7 +118,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { ginkgo.By("retrieving those results all at once") opts := metav1.ListOptions{Limit: numberOfTotalResources + 1} - list, err := client.List(context.TODO(), opts) + list, err := client.List(ctx, opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources)) }) @@ -132,7 +132,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { oneTenth := int64(numberOfTotalResources / 10) opts := metav1.ListOptions{} opts.Limit = oneTenth - list, err := client.List(context.TODO(), opts) + list, err := client.List(ctx, opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) firstToken := list.Continue firstRV := list.ResourceVersion @@ -150,7 +150,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { opts.Continue = firstToken var inconsistentToken string wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) { - _, err := client.List(context.TODO(), opts) + _, err := client.List(ctx, opts) if err == nil { framework.Logf("Token %s has not expired yet", firstToken) return false, nil @@ -173,7 +173,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { ginkgo.By("retrieving the second page again with the token received with the error message") opts.Continue = inconsistentToken - list, err = client.List(context.TODO(), opts) + list, err = client.List(ctx, opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit) framework.ExpectNotEqual(list.ResourceVersion, firstRV) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit)) @@ -196,7 +196,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { opts.Continue = list.Continue lastRV := list.ResourceVersion for { - list, err := client.List(context.TODO(), opts) + list, err := client.List(ctx, opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) if shouldCheckRemainingItem() { if list.GetContinue() == "" { diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index b7cbd7adbe2..431278f5eb3 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -122,14 +122,14 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", servicePort := int32(9443) containerPort := int32(9444) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.DeferCleanup(cleanCRDWebhookTest, f.ClientSet, f.Namespace.Name) ginkgo.By("Setting up server cert") certCtx = setupServerCert(f.Namespace.Name, serviceCRDName) - createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name) + createAuthReaderRoleBindingForCRDConversion(ctx, f, f.Namespace.Name) - deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) + deployCustomResourceWebhookAndService(ctx, f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) }) /* @@ -162,8 +162,8 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", return } ginkgo.DeferCleanup(testcrd.CleanUp) - waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") - testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients) + waitWebhookConversionReady(ctx, f, testcrd.Crd, testcrd.DynamicClients, "v2") + testCustomResourceConversionWebhook(ctx, f, testcrd.Crd, testcrd.DynamicClients) }) /* @@ -197,23 +197,23 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", return } ginkgo.DeferCleanup(testcrd.CleanUp) - waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") - testCRListConversion(f, testcrd) + waitWebhookConversionReady(ctx, f, testcrd.Crd, testcrd.DynamicClients, "v2") + testCRListConversion(ctx, f, testcrd) }) }) -func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, metav1.DeleteOptions{}) - _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, metav1.DeleteOptions{}) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, metav1.DeleteOptions{}) +func cleanCRDWebhookTest(ctx context.Context, client clientset.Interface, namespaceName string) { + _ = client.CoreV1().Services(namespaceName).Delete(ctx, serviceCRDName, metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespaceName).Delete(ctx, deploymentCRDName, metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespaceName).Delete(ctx, secretCRDName, metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(ctx, roleBindingCRDName, metav1.DeleteOptions{}) } -func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) { +func createAuthReaderRoleBindingForCRDConversion(ctx context.Context, f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication") client := f.ClientSet // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap - _, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ + _, err := client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleBindingCRDName, }, @@ -238,7 +238,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa } } -func deployCustomResourceWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) { +func deployCustomResourceWebhookAndService(ctx context.Context, f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) { ginkgo.By("Deploying the custom resource conversion webhook pod") client := f.ClientSet @@ -254,7 +254,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -307,7 +307,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, d.Spec.Template.Spec.Containers = containers d.Spec.Template.Spec.Volumes = volumes - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) ginkgo.By("Wait for the deployment to be ready") @@ -338,11 +338,11 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") - err = framework.WaitForServiceEndpointsNum(client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second) + err = framework.WaitForServiceEndpointsNum(ctx, client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second) framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1) } @@ -371,7 +371,7 @@ func verifyV2Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstruct gomega.Expect(port).To(gomega.BeEquivalentTo("8080")) } -func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) { +func testCustomResourceConversionWebhook(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) { name := "cr-instance-1" ginkgo.By("Creating a v1 custom resource") crInstance := &unstructured.Unstructured{ @@ -385,15 +385,15 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensi "hostPort": "localhost:8080", }, } - _, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err := customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("v2 custom resource should be converted") - v2crd, err := customResourceClients["v2"].Get(context.TODO(), name, metav1.GetOptions{}) + v2crd, err := customResourceClients["v2"].Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting v2 of custom resource %s", name) verifyV2Object(crd, v2crd) } -func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { +func testCRListConversion(ctx context.Context, f *framework.Framework, testCrd *crd.TestCrd) { crd := testCrd.Crd customResourceClients := testCrd.DynamicClients name1 := "cr-instance-1" @@ -410,7 +410,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { "hostPort": "localhost:8080", }, } - _, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err := customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{}) framework.ExpectNoError(err) // Now cr-instance-1 is stored as v1. lets change storage version @@ -437,7 +437,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { // // TODO: we have to wait for the storage version to become effective. Storage version changes are not instant. for i := 0; i < 5; i++ { - _, err = customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err = customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{}) if err == nil { break } @@ -447,7 +447,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { // Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected. ginkgo.By("List CRs in v1") - list, err := customResourceClients["v1"].List(context.TODO(), metav1.ListOptions{}) + list, err := customResourceClients["v1"].List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || @@ -456,7 +456,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { verifyV1Object(crd, &list.Items[1]) ginkgo.By("List CRs in v2") - list, err = customResourceClients["v2"].List(context.TODO(), metav1.ListOptions{}) + list, err = customResourceClients["v2"].List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || @@ -466,8 +466,8 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { } // waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds. -func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) { - framework.ExpectNoError(wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { +func waitWebhookConversionReady(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) { + framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { crInstance := &unstructured.Unstructured{ Object: map[string]interface{}{ "kind": crd.Spec.Names.Kind, @@ -478,7 +478,7 @@ func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.Cus }, }, } - _, err := customResourceClients[version].Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err := customResourceClients[version].Create(ctx, crInstance, metav1.CreateOptions{}) if err != nil { // tolerate clusters that do not set --enable-aggregator-routing and have to wait for kube-proxy // to program the service network, during which conversion requests return errors @@ -486,7 +486,7 @@ func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.Cus return false, nil } - framework.ExpectNoError(customResourceClients[version].Delete(context.TODO(), crInstance.GetName(), metav1.DeleteOptions{}), "cleaning up stub object") + framework.ExpectNoError(customResourceClients[version].Delete(ctx, crInstance.GetName(), metav1.DeleteOptions{}), "cleaning up stub object") return true, nil })) } diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index db4230081f3..29998b33fcf 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -138,7 +138,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err) } - if err := cleanupCRD(f, crd); err != nil { + if err := cleanupCRD(ctx, f, crd); err != nil { framework.Failf("%v", err) } }) @@ -179,7 +179,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("%v", err) } - if err := cleanupCRD(f, crd); err != nil { + if err := cleanupCRD(ctx, f, crd); err != nil { framework.Failf("%v", err) } }) @@ -220,7 +220,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("%v", err) } - if err := cleanupCRD(f, crd); err != nil { + if err := cleanupCRD(ctx, f, crd); err != nil { framework.Failf("%v", err) } }) @@ -262,7 +262,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("%v", err) } - if err := cleanupCRD(f, crd); err != nil { + if err := cleanupCRD(ctx, f, crd); err != nil { framework.Failf("%v", err) } }) @@ -292,10 +292,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v1"), schemaFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdFoo); err != nil { + if err := cleanupCRD(ctx, f, crdFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdWaldo); err != nil { + if err := cleanupCRD(ctx, f, crdWaldo); err != nil { framework.Failf("%v", err) } }) @@ -318,7 +318,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdMultiVer); err != nil { + if err := cleanupCRD(ctx, f, crdMultiVer); err != nil { framework.Failf("%v", err) } @@ -340,10 +340,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v4"), schemaFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdFoo); err != nil { + if err := cleanupCRD(ctx, f, crdFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdWaldo); err != nil { + if err := cleanupCRD(ctx, f, crdWaldo); err != nil { framework.Failf("%v", err) } }) @@ -373,10 +373,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v6"), schemaFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdFoo); err != nil { + if err := cleanupCRD(ctx, f, crdFoo); err != nil { framework.Failf("%v", err) } - if err := cleanupCRD(f, crdWaldo); err != nil { + if err := cleanupCRD(ctx, f, crdWaldo); err != nil { framework.Failf("%v", err) } }) @@ -406,7 +406,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu {"op":"test","path":"/spec/versions/1/name","value":"v3"}, {"op": "replace", "path": "/spec/versions/1/name", "value": "v4"} ]`) - crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) + crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crdMultiVer.Crd.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) if err != nil { framework.Failf("%v", err) } @@ -427,7 +427,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu // TestCrd.Versions is different from TestCrd.Crd.Versions, we have to manually // update the name there. Used by cleanupCRD crdMultiVer.Crd.Spec.Versions[1].Name = "v4" - if err := cleanupCRD(f, crdMultiVer); err != nil { + if err := cleanupCRD(ctx, f, crdMultiVer); err != nil { framework.Failf("%v", err) } }) @@ -454,12 +454,12 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu } ginkgo.By("mark a version not serverd") - crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Crd.Name, metav1.GetOptions{}) + crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Crd.Name, metav1.GetOptions{}) if err != nil { framework.Failf("%v", err) } crd.Crd.Spec.Versions[1].Served = false - crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd, metav1.UpdateOptions{}) + crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(ctx, crd.Crd, metav1.UpdateOptions{}) if err != nil { framework.Failf("%v", err) } @@ -473,7 +473,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu framework.Failf("%v", err) } - if err := cleanupCRD(f, crd); err != nil { + if err := cleanupCRD(ctx, f, crd); err != nil { framework.Failf("%v", err) } }) @@ -497,11 +497,11 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu } if err := verifyKubectlExplain(f.Namespace.Name, customServiceShortName+".spec", `(?s)DESCRIPTION:.*Specification of CustomService.*FIELDS:.*dummy.*.*Dummy property`); err != nil { - _ = cleanupCRD(f, crdSvc) // need to remove the crd since its name is unchanged + _ = cleanupCRD(ctx, f, crdSvc) // need to remove the crd since its name is unchanged framework.Failf("%v", err) } - if err := cleanupCRD(f, crdSvc); err != nil { + if err := cleanupCRD(ctx, f, crdSvc); err != nil { framework.Failf("%v", err) } }) @@ -572,8 +572,8 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [ return crd, nil } -func cleanupCRD(f *framework.Framework, crd *crd.TestCrd) error { - crd.CleanUp() +func cleanupCRD(ctx context.Context, f *framework.Framework, crd *crd.TestCrd) error { + _ = crd.CleanUp(ctx) for _, v := range crd.Crd.Spec.Versions { name := definitionName(crd, v.Name) if err := waitForDefinitionCleanup(f.ClientSet, name); err != nil { diff --git a/test/e2e/apimachinery/crd_validation_rules.go b/test/e2e/apimachinery/crd_validation_rules.go index dbd8278449d..aa83872b593 100644 --- a/test/e2e/apimachinery/crd_validation_rules.go +++ b/test/e2e/apimachinery/crd_validation_rules.go @@ -107,7 +107,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f ginkgo.By("Creating a custom resource with values that are allowed by the validation rules set on the custom resource definition") crClient, gvr := customResourceClient(crd) name1 := names.SimpleNameGenerator.GenerateName("cr-1") - _, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -137,7 +137,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f ginkgo.By("Creating a custom resource with values that fail the validation rules set on the custom resource definition") crClient, gvr := customResourceClient(crd) name1 := names.SimpleNameGenerator.GenerateName("cr-1") - _, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -248,7 +248,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f ginkgo.By("Attempting to create a custom resource that will exceed the runtime cost limit") crClient, gvr := customResourceClient(crd) name1 := names.SimpleNameGenerator.GenerateName("cr-1") - _, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -294,7 +294,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f ginkgo.By("Attempting to create a custom resource") crClient, gvr := customResourceClient(crd) name1 := names.SimpleNameGenerator.GenerateName("cr-1") - unstruct, err := crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + unstruct, err := crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -307,7 +307,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f }}, metav1.CreateOptions{}) framework.ExpectNoError(err, "transition rules do not apply to create operations") ginkgo.By("Updating a custom resource with a value that does not satisfy an x-kubernetes-validations transition rule") - _, err = crClient.Namespace(f.Namespace.Name).Update(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + _, err = crClient.Namespace(f.Namespace.Name).Update(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ diff --git a/test/e2e/apimachinery/crd_watch.go b/test/e2e/apimachinery/crd_watch.go index 4a9117b1c59..9740a1d7046 100644 --- a/test/e2e/apimachinery/crd_watch.go +++ b/test/e2e/apimachinery/crd_watch.go @@ -82,47 +82,47 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]", noxuResourceClient, err := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition) framework.ExpectNoError(err, "creating custom resource client") - watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA) + watchA, err := watchCRWithName(ctx, noxuResourceClient, watchCRNameA) framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA) - watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB) + watchB, err := watchCRWithName(ctx, noxuResourceClient, watchCRNameB) framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB) testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA) testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB) ginkgo.By("Creating first CR ") - testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition) + testCrA, err = instantiateCustomResource(ctx, testCrA, noxuResourceClient, noxuDefinition) framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA) expectEvent(watchA, watch.Added, testCrA) expectNoEvent(watchB, watch.Added, testCrA) ginkgo.By("Creating second CR") - testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition) + testCrB, err = instantiateCustomResource(ctx, testCrB, noxuResourceClient, noxuDefinition) framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB) expectEvent(watchB, watch.Added, testCrB) expectNoEvent(watchA, watch.Added, testCrB) ginkgo.By("Modifying first CR") - err = patchCustomResource(noxuResourceClient, watchCRNameA) + err = patchCustomResource(ctx, noxuResourceClient, watchCRNameA) framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameA) expectEvent(watchA, watch.Modified, nil) expectNoEvent(watchB, watch.Modified, nil) ginkgo.By("Modifying second CR") - err = patchCustomResource(noxuResourceClient, watchCRNameB) + err = patchCustomResource(ctx, noxuResourceClient, watchCRNameB) framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameB) expectEvent(watchB, watch.Modified, nil) expectNoEvent(watchA, watch.Modified, nil) ginkgo.By("Deleting first CR") - err = deleteCustomResource(noxuResourceClient, watchCRNameA) + err = deleteCustomResource(ctx, noxuResourceClient, watchCRNameA) framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA) expectEvent(watchA, watch.Deleted, nil) expectNoEvent(watchB, watch.Deleted, nil) ginkgo.By("Deleting second CR") - err = deleteCustomResource(noxuResourceClient, watchCRNameB) + err = deleteCustomResource(ctx, noxuResourceClient, watchCRNameB) framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB) expectEvent(watchB, watch.Deleted, nil) expectNoEvent(watchA, watch.Deleted, nil) @@ -130,9 +130,9 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]", }) }) -func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) { +func watchCRWithName(ctx context.Context, crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) { return crdResourceClient.Watch( - context.TODO(), + ctx, metav1.ListOptions{ FieldSelector: "metadata.name=" + name, TimeoutSeconds: int64ptr(600), @@ -140,8 +140,8 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) ( ) } -func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { - createdInstance, err := client.Create(context.TODO(), instanceToCreate, metav1.CreateOptions{}) +func instantiateCustomResource(ctx context.Context, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { + createdInstance, err := client.Create(ctx, instanceToCreate, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -169,9 +169,9 @@ func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, clie return createdInstance, nil } -func patchCustomResource(client dynamic.ResourceInterface, name string) error { +func patchCustomResource(ctx context.Context, client dynamic.ResourceInterface, name string) error { _, err := client.Patch( - context.TODO(), + ctx, name, types.JSONPatchType, []byte(`[{ "op": "add", "path": "/dummy", "value": "test" }]`), @@ -179,8 +179,8 @@ func patchCustomResource(client dynamic.ResourceInterface, name string) error { return err } -func deleteCustomResource(client dynamic.ResourceInterface, name string) error { - return client.Delete(context.TODO(), name, metav1.DeleteOptions{}) +func deleteCustomResource(ctx context.Context, client dynamic.ResourceInterface, name string) error { + return client.Delete(ctx, name, metav1.DeleteOptions{}) } func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1.CustomResourceDefinition) (dynamic.ResourceInterface, error) { diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index dcc5934c5f5..f4454a05bb9 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -112,7 +112,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin }() selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} - list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), selectorListOpts) + list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(ctx, selectorListOpts) framework.ExpectNoError(err, "listing CustomResourceDefinitions") framework.ExpectEqual(len(list.Items), testListSize) for _, actual := range list.Items { @@ -132,7 +132,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin // Use delete collection to remove the CRDs err = fixtures.DeleteV1CustomResourceDefinitions(selectorListOpts, apiExtensionClient) framework.ExpectNoError(err, "deleting CustomResourceDefinitions") - _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting remaining CustomResourceDefinition") }) @@ -165,21 +165,21 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin updateCondition := v1.CustomResourceDefinitionCondition{Message: "updated"} err = retry.RetryOnConflict(retry.DefaultRetry, func() error { // Use dynamic client to read the status sub-resource since typed client does not expose it. - u, err := resourceClient.Get(context.TODO(), crd.GetName(), metav1.GetOptions{}, "status") + u, err := resourceClient.Get(ctx, crd.GetName(), metav1.GetOptions{}, "status") framework.ExpectNoError(err, "getting CustomResourceDefinition status") status := unstructuredToCRD(u) if !equality.Semantic.DeepEqual(status.Spec, crd.Spec) { framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec)) } status.Status.Conditions = append(status.Status.Conditions, updateCondition) - updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status, metav1.UpdateOptions{}) + updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(ctx, status, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "updating CustomResourceDefinition status") expectCondition(updated.Status.Conditions, updateCondition) patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"} - patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.GetName(), + patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.GetName(), types.JSONPatchType, []byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), metav1.PatchOptions{}, "status") @@ -199,7 +199,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin { ginkgo.By("fetching the /apis discovery document") apiGroupList := &metav1.APIGroupList{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis").Do(ctx).Into(apiGroupList) framework.ExpectNoError(err, "fetching /apis") ginkgo.By("finding the apiextensions.k8s.io API group in the /apis discovery document") @@ -226,7 +226,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin { ginkgo.By("fetching the /apis/apiextensions.k8s.io discovery document") group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io") framework.ExpectEqual(group.Name, v1.GroupName, "verifying API group name in /apis/apiextensions.k8s.io discovery document") @@ -244,7 +244,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin { ginkgo.By("fetching the /apis/apiextensions.k8s.io/v1 discovery document") apiResourceList := &metav1.APIResourceList{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io/v1").Do(context.TODO()).Into(apiResourceList) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io/v1").Do(ctx).Into(apiResourceList) framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io/v1") framework.ExpectEqual(apiResourceList.GroupVersion, v1.SchemeGroupVersion.String(), "verifying API group/version in /apis/apiextensions.k8s.io/v1 discovery document") @@ -296,7 +296,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Resource: crd.Spec.Names.Plural, } crClient := dynamicClient.Resource(gvr) - _, err = crClient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + _, err = crClient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -306,13 +306,13 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.ExpectNoError(err, "creating CR") // Setting default for a to "A" and waiting for the CR to get defaulted on read - crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ + crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.Name, types.JSONPatchType, []byte(`[ {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"} ]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "setting default for a to \"A\" in schema") err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { - u1, err := crClient.Get(context.TODO(), name1, metav1.GetOptions{}) + u1, err := crClient.Get(ctx, name1, metav1.GetOptions{}) if err != nil { return false, err } @@ -332,7 +332,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin // create CR with default in storage name2 := names.SimpleNameGenerator.GenerateName("cr-2") - u2, err := crClient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ + u2, err := crClient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": gvr.Group + "/" + gvr.Version, "kind": crd.Spec.Names.Kind, "metadata": map[string]interface{}{ @@ -347,14 +347,14 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.ExpectEqual(v, "A", "\"a\" is defaulted to \"A\"") // Deleting default for a, adding default "B" for b and waiting for the CR to get defaulted on read for b - crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ + crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.Name, types.JSONPatchType, []byte(`[ {"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"}, {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"} ]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "setting default for b to \"B\" and remove default for a") err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { - u2, err := crClient.Get(context.TODO(), name2, metav1.GetOptions{}) + u2, err := crClient.Get(ctx, name2, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apimachinery/discovery.go b/test/e2e/apimachinery/discovery.go index 34742bea1d4..36c32271c6d 100644 --- a/test/e2e/apimachinery/discovery.go +++ b/test/e2e/apimachinery/discovery.go @@ -123,7 +123,7 @@ var _ = SIGDescribe("Discovery", func() { // get list of APIGroup endpoints list := &metav1.APIGroupList{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/").Do(context.TODO()).Into(list) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/").Do(ctx).Into(list) framework.ExpectNoError(err, "Failed to find /apis/") framework.ExpectNotEqual(len(list.Groups), 0, "Missing APIGroups") @@ -137,7 +137,7 @@ var _ = SIGDescribe("Discovery", func() { // locate APIGroup endpoint checkGroup := &metav1.APIGroup{} apiPath := "/apis/" + group.Name + "/" - err = f.ClientSet.Discovery().RESTClient().Get().AbsPath(apiPath).Do(context.TODO()).Into(checkGroup) + err = f.ClientSet.Discovery().RESTClient().Get().AbsPath(apiPath).Do(ctx).Into(checkGroup) framework.ExpectNoError(err, "Fail to access: %s", apiPath) framework.ExpectNotEqual(len(checkGroup.Versions), 0, "No version found for %v", group.Name) framework.Logf("PreferredVersion.GroupVersion: %s", checkGroup.PreferredVersion.GroupVersion) diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index f587b1fe01c..34b961f34a7 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -41,7 +41,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { f := framework.NewDefaultFramework("etcd-failure") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // This test requires: // - SSH // - master access @@ -50,7 +50,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessSSHKeyPresent() - err := e2erc.RunRC(testutils.RCConfig{ + err := e2erc.RunRC(ctx, testutils.RCConfig{ Client: f.ClientSet, Name: "baz", Namespace: f.Namespace.Name, @@ -62,6 +62,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { ginkgo.It("should recover from network partition with master", func(ctx context.Context) { etcdFailTest( + ctx, f, "sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP", "sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP", @@ -70,6 +71,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { ginkgo.It("should recover from SIGKILL", func(ctx context.Context) { etcdFailTest( + ctx, f, "pgrep etcd | xargs -I {} sudo kill -9 {}", "echo 'do nothing. monit should restart etcd.'", @@ -77,12 +79,12 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { }) }) -func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) { - doEtcdFailure(failCommand, fixCommand) +func etcdFailTest(ctx context.Context, f *framework.Framework, failCommand, fixCommand string) { + doEtcdFailure(ctx, failCommand, fixCommand) - checkExistingRCRecovers(f) + checkExistingRCRecovers(ctx, f) - apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) + apps.TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) } // For this duration, etcd will be failed by executing a failCommand on the master. @@ -92,17 +94,17 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) { // master and go on to assert that etcd and kubernetes components recover. const etcdFailureDuration = 20 * time.Second -func doEtcdFailure(failCommand, fixCommand string) { +func doEtcdFailure(ctx context.Context, failCommand, fixCommand string) { ginkgo.By("failing etcd") - masterExec(failCommand) + masterExec(ctx, failCommand) time.Sleep(etcdFailureDuration) - masterExec(fixCommand) + masterExec(ctx, fixCommand) } -func masterExec(cmd string) { +func masterExec(ctx context.Context, cmd string) { host := framework.APIAddress() + ":22" - result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd) if result.Code != 0 { e2essh.LogResult(result) @@ -110,15 +112,15 @@ func masterExec(cmd string) { } } -func checkExistingRCRecovers(f *framework.Framework) { +func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) { ginkgo.By("assert that the pre-existing replication controller recovers") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) rcSelector := labels.Set{"name": "baz"}.AsSelector() ginkgo.By("deleting pods from existing replication controller") - framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) if err != nil { framework.Logf("apiserver returned error, as expected before recovery: %v", err) return false, nil @@ -127,7 +129,7 @@ func checkExistingRCRecovers(f *framework.Framework) { return false, nil } for _, pod := range pods.Items { - err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) } framework.Logf("apiserver has recovered") @@ -135,9 +137,9 @@ func checkExistingRCRecovers(f *framework.Framework) { })) ginkgo.By("waiting for replication controller to recover") - framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String()) for _, pod := range pods.Items { if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) { diff --git a/test/e2e/apimachinery/flowcontrol.go b/test/e2e/apimachinery/flowcontrol.go index 96165c72637..6c5510dcde1 100644 --- a/test/e2e/apimachinery/flowcontrol.go +++ b/test/e2e/apimachinery/flowcontrol.go @@ -62,13 +62,13 @@ var _ = SIGDescribe("API priority and fairness", func() { nonMatchingUsername := "foo" ginkgo.By("creating a testing PriorityLevelConfiguration object") - createdPriorityLevel := createPriorityLevel(f, testingPriorityLevelName, 1) + createdPriorityLevel := createPriorityLevel(ctx, f, testingPriorityLevelName, 1) ginkgo.By("creating a testing FlowSchema object") - createdFlowSchema := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername}) + createdFlowSchema := createFlowSchema(ctx, f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername}) ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") - waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName) + waitForSteadyState(ctx, f, testingFlowSchemaName, testingPriorityLevelName) var response *http.Response ginkgo.By("response headers should contain the UID of the appropriate FlowSchema and PriorityLevelConfiguration for a matching user") @@ -130,19 +130,19 @@ var _ = SIGDescribe("API priority and fairness", func() { for i := range clients { clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username) framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName) - createPriorityLevel(f, clients[i].priorityLevelName, 1) + createPriorityLevel(ctx, f, clients[i].priorityLevelName, 1) clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username) framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName) - createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username}) + createFlowSchema(ctx, f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username}) ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") - waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName) + waitForSteadyState(ctx, f, clients[i].flowSchemaName, clients[i].priorityLevelName) } ginkgo.By("getting request concurrency from metrics") for i := range clients { - realConcurrency, err := getPriorityLevelNominalConcurrency(f.ClientSet, clients[i].priorityLevelName) + realConcurrency, err := getPriorityLevelNominalConcurrency(ctx, f.ClientSet, clients[i].priorityLevelName) framework.ExpectNoError(err) clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) if clients[i].concurrency < 1 { @@ -189,15 +189,15 @@ var _ = SIGDescribe("API priority and fairness", func() { loadDuration := 10 * time.Second framework.Logf("creating PriorityLevel %q", priorityLevelName) - createPriorityLevel(f, priorityLevelName, 1) + createPriorityLevel(ctx, f, priorityLevelName, 1) highQPSClientName := "highqps-" + f.UniqueName lowQPSClientName := "lowqps-" + f.UniqueName framework.Logf("creating FlowSchema %q", flowSchemaName) - createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName}) + createFlowSchema(ctx, f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName}) ginkgo.By("waiting for testing flow schema and priority level to reach steady state") - waitForSteadyState(f, flowSchemaName, priorityLevelName) + waitForSteadyState(ctx, f, flowSchemaName, priorityLevelName) type client struct { username string @@ -213,7 +213,7 @@ var _ = SIGDescribe("API priority and fairness", func() { } framework.Logf("getting real concurrency") - realConcurrency, err := getPriorityLevelNominalConcurrency(f.ClientSet, priorityLevelName) + realConcurrency, err := getPriorityLevelNominalConcurrency(ctx, f.ClientSet, priorityLevelName) framework.ExpectNoError(err) for i := range clients { clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) @@ -250,9 +250,9 @@ var _ = SIGDescribe("API priority and fairness", func() { // createPriorityLevel creates a priority level with the provided assured // concurrency share. -func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration { +func createPriorityLevel(ctx context.Context, f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration { createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create( - context.TODO(), + ctx, &flowcontrol.PriorityLevelConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: priorityLevelName, @@ -273,8 +273,8 @@ func createPriorityLevel(f *framework.Framework, priorityLevelName string, nomin return createdPriorityLevel } -func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) { - resp, err := c.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(context.TODO()) +func getPriorityLevelNominalConcurrency(ctx context.Context, c clientset.Interface, priorityLevelName string) (int32, error) { + resp, err := c.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(ctx) if err != nil { return 0, err } @@ -306,7 +306,7 @@ func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName // createFlowSchema creates a flow schema referring to a particular priority // level and matching the username provided. -func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema { +func createFlowSchema(ctx context.Context, f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema { var subjects []flowcontrol.Subject for _, matchingUsername := range matchingUsernames { subjects = append(subjects, flowcontrol.Subject{ @@ -318,7 +318,7 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre } createdFlowSchema, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Create( - context.TODO(), + ctx, &flowcontrol.FlowSchema{ ObjectMeta: metav1.ObjectMeta{ Name: flowSchemaName, @@ -354,9 +354,9 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre // created flow schema and priority level have been seen by the APF controller // by checking: (1) the dangling priority level reference condition in the flow // schema status, and (2) metrics. The function times out after 30 seconds. -func waitForSteadyState(f *framework.Framework, flowSchemaName string, priorityLevelName string) { - framework.ExpectNoError(wait.Poll(time.Second, 30*time.Second, func() (bool, error) { - fs, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Get(context.TODO(), flowSchemaName, metav1.GetOptions{}) +func waitForSteadyState(ctx context.Context, f *framework.Framework, flowSchemaName string, priorityLevelName string) { + framework.ExpectNoError(wait.PollWithContext(ctx, time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + fs, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Get(ctx, flowSchemaName, metav1.GetOptions{}) if err != nil { return false, err } @@ -368,7 +368,7 @@ func waitForSteadyState(f *framework.Framework, flowSchemaName string, priorityL // hasn't been achieved. return false, nil } - _, err = getPriorityLevelNominalConcurrency(f.ClientSet, priorityLevelName) + _, err = getPriorityLevelNominalConcurrency(ctx, f.ClientSet, priorityLevelName) if err != nil { if err == errPriorityLevelNotFound { return false, nil diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 3b917e0cbfd..e0fb6266ca3 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -52,8 +52,8 @@ import ( // estimateMaximumPods estimates how many pods the cluster can handle // with some wiggle room, to prevent pods being unable to schedule due // to max pod constraints. -func estimateMaximumPods(c clientset.Interface, min, max int32) int32 { - nodes, err := e2enode.GetReadySchedulableNodes(c) +func estimateMaximumPods(ctx context.Context, c clientset.Interface, min, max int32) int32 { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) availablePods := int32(0) @@ -173,13 +173,13 @@ func newGCPod(name string) *v1.Pod { // verifyRemainingObjects verifies if the number of remaining objects. // It returns error if the communication with the API server fails. -func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) { +func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects map[string]int) (bool, error) { var ret = true for object, num := range objects { switch object { case "Pods": - pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -188,7 +188,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items))) } case "Deployments": - deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list deployments: %v", err) } @@ -197,7 +197,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items))) } case "ReplicaSets": - rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -206,7 +206,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items))) } case "ReplicationControllers": - rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list replication controllers: %v", err) } @@ -215,7 +215,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items))) } case "CronJobs": - cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list cronjobs: %v", err) } @@ -224,7 +224,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items))) } case "Jobs": - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list jobs: %v", err) } @@ -240,14 +240,14 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo return ret, nil } -func gatherMetrics(f *framework.Framework) { +func gatherMetrics(ctx context.Context, f *framework.Framework) { ginkgo.By("Gathering metrics") var summary framework.TestDataSummary - grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), false, false, true, false, false, false) + grabber, err := e2emetrics.NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), false, false, true, false, false, false) if err != nil { framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { - received, err := grabber.Grab() + received, err := grabber.Grab(ctx) if err != nil { framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { @@ -317,13 +317,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_pods") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create some pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -342,24 +342,24 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getBackgroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for all pods to be garbage collected") // wait for the RCs and Pods to reach the expected numbers. - if err := wait.Poll(5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func() (bool, error) { + if err := wait.PollWithContext(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { objects := map[string]int{"ReplicationControllers": 0, "Pods": 0} - return verifyRemainingObjects(f, objects) + return verifyRemainingObjects(ctx, f, objects) }); err != nil { framework.Failf("failed to wait for all pods to be deleted: %v", err) - remainingPods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + remainingPods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("failed to list pods post mortem: %v", err) } else { framework.Failf("remaining pods are: %#v", remainingPods) } } - gatherMetrics(f) + gatherMetrics(ctx, f) }) /* @@ -373,15 +373,15 @@ var _ = SIGDescribe("Garbage collector", func() { podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" uniqLabels := getUniqLabel("gctest", "orphan_pods") - rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) + rc := newOwnerRC(f, rcName, estimateMaximumPods(ctx, clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create pods - if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -396,7 +396,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getOrphanOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") @@ -407,8 +407,8 @@ var _ = SIGDescribe("Garbage collector", func() { // actual qps is less than 5. Also, the e2e tests are running in // parallel, the GC controller might get distracted by other tests. // According to the test logs, 120s is enough time. - if err := wait.Poll(5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { - rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + rcs, err := rcClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rcs: %v", err) } @@ -421,15 +421,15 @@ var _ = SIGDescribe("Garbage collector", func() { } ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") time.Sleep(30 * time.Second) - pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list pods: %v", err) } if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a { framework.Failf("expect %d pods, got %d pods", e, a) } - gatherMetrics(f) - if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { + gatherMetrics(ctx, f) + if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil { framework.Logf("WARNING: failed to delete pods: %v", err) } }) @@ -444,13 +444,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create some pods - if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -465,20 +465,20 @@ var _ = SIGDescribe("Garbage collector", func() { deleteOptions := metav1.DeleteOptions{ Preconditions: metav1.NewUIDPreconditions(string(rc.UID)), } - if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") time.Sleep(30 * time.Second) - pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list pods: %v", err) } if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a { framework.Failf("expect %d pods, got %d pods", e, a) } - gatherMetrics(f) - if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { + gatherMetrics(ctx, f) + if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil { framework.Logf("WARNING: failed to delete pods: %v", err) } }) @@ -496,14 +496,14 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) + createdDeployment, err := deployClient.Create(ctx, deployment, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create deployment: %v", err) } // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") - err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -517,18 +517,18 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the deployment") deleteOptions := getBackgroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) - if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { + if err := deployClient.Delete(ctx, deployment.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for all rs to be garbage collected") - err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func() (bool, error) { + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0} - return verifyRemainingObjects(f, objects) + return verifyRemainingObjects(ctx, f, objects) }) if err != nil { errList := make([]error, 0) errList = append(errList, err) - remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) + remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) } else { @@ -539,7 +539,7 @@ var _ = SIGDescribe("Garbage collector", func() { } - gatherMetrics(f) + gatherMetrics(ctx, f) }) /* @@ -555,15 +555,15 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) + createdDeployment, err := deployClient.Create(ctx, deployment, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create deployment: %v", err) } // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") var replicaset appsv1.ReplicaSet - err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -579,8 +579,8 @@ var _ = SIGDescribe("Garbage collector", func() { } desiredGeneration := replicaset.Generation - if err := wait.PollImmediate(100*time.Millisecond, 60*time.Second, func() (bool, error) { - newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(context.TODO(), replicaset.Name, metav1.GetOptions{}) + if err := wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) { + newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(ctx, replicaset.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -592,12 +592,12 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the deployment") deleteOptions := getOrphanOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) - if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { + if err := deployClient.Delete(ctx, deployment.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs") - err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func() (bool, error) { - dList, err := deployClient.List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + dList, err := deployClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list deployments: %v", err) } @@ -608,19 +608,19 @@ var _ = SIGDescribe("Garbage collector", func() { } // Once the deployment object is gone, we'll know the GC has finished performing any relevant actions. objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2} - ok, err := verifyRemainingObjects(f, objects) + ok, err := verifyRemainingObjects(ctx, f, objects) if err != nil { framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err) } if !ok { errList := make([]error, 0) - remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) + remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) } else { errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs)) } - remainingDSs, err := deployClient.List(context.TODO(), metav1.ListOptions{}) + remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err)) } else { @@ -629,7 +629,7 @@ var _ = SIGDescribe("Garbage collector", func() { aggregatedError := utilerrors.NewAggregate(errList) framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError) } - rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list ReplicaSet %v", err) } @@ -639,7 +639,7 @@ var _ = SIGDescribe("Garbage collector", func() { } } - gatherMetrics(f) + gatherMetrics(ctx, f) }) /* @@ -653,15 +653,15 @@ var _ = SIGDescribe("Garbage collector", func() { podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" uniqLabels := getUniqLabel("gctest", "delete_pods_foreground") - rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) + rc := newOwnerRC(f, rcName, estimateMaximumPods(ctx, clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create pods - if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -675,7 +675,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") @@ -684,10 +684,10 @@ var _ = SIGDescribe("Garbage collector", func() { // owner deletion, but in practice there can be a long delay between owner // deletion and dependent deletion processing. For now, increase the timeout // and investigate the processing delay. - if err := wait.Poll(1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { - _, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + _, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err == nil { - pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, _ := podClient.List(ctx, metav1.ListOptions{}) framework.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { @@ -704,7 +704,7 @@ var _ = SIGDescribe("Garbage collector", func() { } return false, err }); err != nil { - pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err2 := podClient.List(ctx, metav1.ListOptions{}) if err2 != nil { framework.Failf("%v", err2) } @@ -716,14 +716,14 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete the rc: %v", err) } // There shouldn't be any pods - pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("%v", err) } if len(pods.Items) != 0 { framework.Failf("expected no pods, got %#v", pods) } - gatherMetrics(f) + gatherMetrics(ctx, f) }) // TODO: this should be an integration test @@ -737,12 +737,12 @@ var _ = SIGDescribe("Garbage collector", func() { rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rc1Name := "simpletest-rc-to-be-deleted" - replicas := estimateMaximumPods(clientSet, 10, 100) + replicas := estimateMaximumPods(ctx, clientSet, 10, 100) halfReplicas := int(replicas / 2) uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d") rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted) ginkgo.By("create the rc1") - rc1, err := rcClient.Create(context.TODO(), rc1, metav1.CreateOptions{}) + rc1, err := rcClient.Create(ctx, rc1, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -750,13 +750,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s") rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay) ginkgo.By("create the rc2") - rc2, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{}) + rc2, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc1 to be stable - if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc1, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -768,28 +768,28 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err) } ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name)) - pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err := podClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name) patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID) for i := 0; i < halfReplicas; i++ { pod := pods.Items[i] - _, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + _, err := podClient.Patch(ctx, pod.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) } ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name)) deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID)) - if err := rcClient.Delete(context.TODO(), rc1.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(ctx, rc1.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046. - if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) { - _, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second, func(ctx context.Context) (bool, error) { + _, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{}) if err == nil { - pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, _ := podClient.List(ctx, metav1.ListOptions{}) framework.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { @@ -806,7 +806,7 @@ var _ = SIGDescribe("Garbage collector", func() { } return false, err }); err != nil { - pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err2 := podClient.List(ctx, metav1.ListOptions{}) if err2 != nil { framework.Failf("%v", err2) } @@ -818,7 +818,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete rc %s, err: %v", rc1Name, err) } // half of the pods should still exist, - pods, err = podClient.List(context.TODO(), metav1.ListOptions{}) + pods, err = podClient.List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("%v", err) } @@ -834,8 +834,8 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences) } } - gatherMetrics(f) - if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { + gatherMetrics(ctx, f) + if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil { framework.Logf("WARNING: failed to delete pods: %v", err) } }) @@ -851,43 +851,43 @@ var _ = SIGDescribe("Garbage collector", func() { podClient := clientSet.CoreV1().Pods(f.Namespace.Name) pod1Name := "pod1" pod1 := newGCPod(pod1Name) - pod1, err := podClient.Create(context.TODO(), pod1, metav1.CreateOptions{}) + pod1, err := podClient.Create(ctx, pod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) pod2Name := "pod2" pod2 := newGCPod(pod2Name) - pod2, err = podClient.Create(context.TODO(), pod2, metav1.CreateOptions{}) + pod2, err = podClient.Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) pod3Name := "pod3" pod3 := newGCPod(pod3Name) - pod3, err = podClient.Create(context.TODO(), pod3, metav1.CreateOptions{}) + pod3, err = podClient.Create(ctx, pod3, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) // create circular dependency addRefPatch := func(name string, uid types.UID) []byte { return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid)) } patch1 := addRefPatch(pod3.Name, pod3.UID) - pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1, metav1.PatchOptions{}) + pod1, err = podClient.Patch(ctx, pod1.Name, types.StrategicMergePatchType, patch1, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) patch2 := addRefPatch(pod1.Name, pod1.UID) - pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2, metav1.PatchOptions{}) + pod2, err = podClient.Patch(ctx, pod2.Name, types.StrategicMergePatchType, patch2, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) patch3 := addRefPatch(pod2.Name, pod2.UID) - pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3, metav1.PatchOptions{}) + pod3, err = podClient.Patch(ctx, pod3.Name, types.StrategicMergePatchType, patch3, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) // delete one pod, should result in the deletion of all pods deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID)) - err = podClient.Delete(context.TODO(), pod1.ObjectMeta.Name, deleteOptions) + err = podClient.Delete(ctx, pod1.ObjectMeta.Name, deleteOptions) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name) var pods *v1.PodList var err2 error // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046. - if err := wait.Poll(5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { - pods, err2 = podClient.List(context.TODO(), metav1.ListOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + pods, err2 = podClient.List(ctx, metav1.ListOptions{}) if err2 != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -946,7 +946,7 @@ var _ = SIGDescribe("Garbage collector", func() { }, }, } - persistedOwner, err := resourceClient.Create(context.TODO(), owner, metav1.CreateOptions{}) + persistedOwner, err := resourceClient.Create(ctx, owner, metav1.CreateOptions{}) if err != nil { framework.Failf("failed to create owner resource %q: %v", ownerName, err) } @@ -971,7 +971,7 @@ var _ = SIGDescribe("Garbage collector", func() { }, }, } - persistedDependent, err := resourceClient.Create(context.TODO(), dependent, metav1.CreateOptions{}) + persistedDependent, err := resourceClient.Create(ctx, dependent, metav1.CreateOptions{}) if err != nil { framework.Failf("failed to create dependent resource %q: %v", dependentName, err) } @@ -979,7 +979,7 @@ var _ = SIGDescribe("Garbage collector", func() { // Delete the owner. background := metav1.DeletePropagationBackground - err = resourceClient.Delete(context.TODO(), ownerName, metav1.DeleteOptions{PropagationPolicy: &background}) + err = resourceClient.Delete(ctx, ownerName, metav1.DeleteOptions{PropagationPolicy: &background}) if err != nil { framework.Failf("failed to delete owner resource %q: %v", ownerName, err) } @@ -993,20 +993,20 @@ var _ = SIGDescribe("Garbage collector", func() { "kind": definition.Spec.Names.Kind, "metadata": map[string]interface{}{"name": canaryName}}, } - _, err = resourceClient.Create(context.TODO(), canary, metav1.CreateOptions{}) + _, err = resourceClient.Create(ctx, canary, metav1.CreateOptions{}) if err != nil { framework.Failf("failed to create canary resource %q: %v", canaryName, err) } framework.Logf("created canary resource %q", canaryName) foreground := metav1.DeletePropagationForeground - err = resourceClient.Delete(context.TODO(), canaryName, metav1.DeleteOptions{PropagationPolicy: &foreground}) + err = resourceClient.Delete(ctx, canaryName, metav1.DeleteOptions{PropagationPolicy: &foreground}) if err != nil { framework.Failf("failed to delete canary resource %q: %v", canaryName, err) } // Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type var lastCanary *unstructured.Unstructured - if err := wait.PollImmediate(5*time.Second, 3*time.Minute, func() (bool, error) { - lastCanary, err = resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) { + lastCanary, err = resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }); err != nil { framework.Logf("canary last state: %#v", lastCanary) @@ -1016,8 +1016,8 @@ var _ = SIGDescribe("Garbage collector", func() { // Ensure the dependent is deleted. var lastDependent *unstructured.Unstructured var err2 error - if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - lastDependent, err2 = resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) { + lastDependent, err2 = resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) return apierrors.IsNotFound(err2), nil }); err != nil { framework.Logf("owner: %#v", persistedOwner) @@ -1027,7 +1027,7 @@ var _ = SIGDescribe("Garbage collector", func() { } // Ensure the owner is deleted. - _, err = resourceClient.Get(context.TODO(), ownerName, metav1.GetOptions{}) + _, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{}) if err == nil { framework.Failf("expected owner resource %q to be deleted", ownerName) } else { @@ -1081,7 +1081,7 @@ var _ = SIGDescribe("Garbage collector", func() { }, }, } - persistedOwner, err := resourceClient.Create(context.TODO(), owner, metav1.CreateOptions{}) + persistedOwner, err := resourceClient.Create(ctx, owner, metav1.CreateOptions{}) if err != nil { framework.Failf("failed to create owner resource %q: %v", ownerName, err) } @@ -1106,21 +1106,21 @@ var _ = SIGDescribe("Garbage collector", func() { }, }, } - _, err = resourceClient.Create(context.TODO(), dependent, metav1.CreateOptions{}) + _, err = resourceClient.Create(ctx, dependent, metav1.CreateOptions{}) if err != nil { framework.Failf("failed to create dependent resource %q: %v", dependentName, err) } framework.Logf("created dependent resource %q", dependentName) // Delete the owner and orphan the dependent. - err = resourceClient.Delete(context.TODO(), ownerName, getOrphanOptions()) + err = resourceClient.Delete(ctx, ownerName, getOrphanOptions()) if err != nil { framework.Failf("failed to delete owner resource %q: %v", ownerName, err) } ginkgo.By("wait for the owner to be deleted") - if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) { - _, err = resourceClient.Get(context.TODO(), ownerName, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second, func(ctx context.Context) (bool, error) { + _, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{}) if err == nil { return false, nil } @@ -1134,8 +1134,8 @@ var _ = SIGDescribe("Garbage collector", func() { // Wait 30s and ensure the dependent is not deleted. ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd") - if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - _, err := resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + _, err := resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) return false, err }); err != nil && err != wait.ErrWaitTimeout { framework.Failf("failed to ensure the dependent is not deleted: %v", err) @@ -1146,12 +1146,12 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("Create the cronjob") cronJob := newCronJob("simple", "*/1 * * * ?") - cronJob, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob, metav1.CreateOptions{}) + cronJob, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Create(ctx, cronJob, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) ginkgo.By("Wait for the CronJob to create new Job") - err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) { - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) { + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list jobs: %v", err) } @@ -1162,18 +1162,18 @@ var _ = SIGDescribe("Garbage collector", func() { } ginkgo.By("Delete the cronjob") - if err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Delete(context.TODO(), cronJob.Name, getBackgroundOptions()); err != nil { + if err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Delete(ctx, cronJob.Name, getBackgroundOptions()); err != nil { framework.Failf("Failed to delete the CronJob: %v", err) } ginkgo.By("Verify if cronjob does not leave jobs nor pods behind") - err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0} - return verifyRemainingObjects(f, objects) + return verifyRemainingObjects(ctx, f, objects) }) if err != nil { framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err) } - gatherMetrics(f) + gatherMetrics(ctx, f) }) }) diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 7b5726c42b6..113a5bba3ad 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -112,7 +112,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() options := metav1.ListOptions{LabelSelector: selector} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) if err != nil { framework.Failf("Failed to query for pods: %v", err) } @@ -121,13 +121,13 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(context.TODO(), options) + w, err := podClient.Watch(ctx, options) if err != nil { framework.Failf("Failed to set up watch: %v", err) } ginkgo.By("creating the pod") - pod, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = podClient.Create(ctx, pod, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create pod: %v", err) } @@ -137,7 +137,7 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: pod.ResourceVersion, } - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) if err != nil { framework.Failf("Failed to query for pods: %v", err) } @@ -148,11 +148,11 @@ var _ = SIGDescribe("Generated clientset", func() { // We need to wait for the pod to be scheduled, otherwise the deletion // will be carried out immediately rather than gracefully. - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("deleting the pod gracefully") gracePeriod := int64(31) - if err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil { + if err := podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil { framework.Failf("Failed to delete pod: %v", err) } @@ -225,7 +225,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() options := metav1.ListOptions{LabelSelector: selector} - cronJobs, err := cronJobClient.List(context.TODO(), options) + cronJobs, err := cronJobClient.List(ctx, options) if err != nil { framework.Failf("Failed to query for cronJobs: %v", err) } @@ -234,13 +234,13 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: cronJobs.ListMeta.ResourceVersion, } - w, err := cronJobClient.Watch(context.TODO(), options) + w, err := cronJobClient.Watch(ctx, options) if err != nil { framework.Failf("Failed to set up watch: %v", err) } ginkgo.By("creating the cronJob") - cronJob, err = cronJobClient.Create(context.TODO(), cronJob, metav1.CreateOptions{}) + cronJob, err = cronJobClient.Create(ctx, cronJob, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create cronJob: %v", err) } @@ -250,7 +250,7 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: cronJob.ResourceVersion, } - cronJobs, err = cronJobClient.List(context.TODO(), options) + cronJobs, err = cronJobClient.List(ctx, options) if err != nil { framework.Failf("Failed to query for cronJobs: %v", err) } @@ -262,12 +262,12 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("deleting the cronJob") // Use DeletePropagationBackground so the CronJob is really gone when the call returns. propagationPolicy := metav1.DeletePropagationBackground - if err := cronJobClient.Delete(context.TODO(), cronJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { + if err := cronJobClient.Delete(ctx, cronJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { framework.Failf("Failed to delete cronJob: %v", err) } options = metav1.ListOptions{LabelSelector: selector} - cronJobs, err = cronJobClient.List(context.TODO(), options) + cronJobs, err = cronJobClient.List(ctx, options) if err != nil { framework.Failf("Failed to list cronJobs to verify deletion: %v", err) } diff --git a/test/e2e/apimachinery/health_handlers.go b/test/e2e/apimachinery/health_handlers.go index 2cb1bce7f14..7254519a51e 100644 --- a/test/e2e/apimachinery/health_handlers.go +++ b/test/e2e/apimachinery/health_handlers.go @@ -93,10 +93,10 @@ var ( ) ) -func testPath(client clientset.Interface, path string, requiredChecks sets.String) error { +func testPath(ctx context.Context, client clientset.Interface, path string, requiredChecks sets.String) error { var result restclient.Result err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { - result = client.CoreV1().RESTClient().Get().RequestURI(path).Do(context.TODO()) + result = client.CoreV1().RESTClient().Get().RequestURI(path).Do(ctx) status := 0 result.StatusCode(&status) return status == 200, nil @@ -121,15 +121,15 @@ var _ = SIGDescribe("health handlers", func() { ginkgo.It("should contain necessary checks", func(ctx context.Context) { ginkgo.By("/health") - err := testPath(f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks) + err := testPath(ctx, f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks) framework.ExpectNoError(err) ginkgo.By("/livez") - err = testPath(f.ClientSet, "/livez?verbose=1", requiredLivezChecks) + err = testPath(ctx, f.ClientSet, "/livez?verbose=1", requiredLivezChecks) framework.ExpectNoError(err) ginkgo.By("/readyz") - err = testPath(f.ClientSet, "/readyz?verbose=1", requiredReadyzChecks) + err = testPath(ctx, f.ClientSet, "/readyz?verbose=1", requiredReadyzChecks) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index cea4b13419b..7930e9b60fb 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -44,7 +44,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { +func extinguish(ctx context.Context, f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { ginkgo.By("Creating testing namespaces") wg := &sync.WaitGroup{} wg.Add(totalNS) @@ -53,7 +53,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max defer wg.Done() defer ginkgo.GinkgoRecover() ns := fmt.Sprintf("nslifetest-%v", n) - _, err := f.CreateNamespace(ns, nil) + _, err := f.CreateNamespace(ctx, ns, nil) framework.ExpectNoError(err, "failed to create namespace: %s", ns) }(n) } @@ -63,7 +63,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max ginkgo.By("Waiting 10 seconds") time.Sleep(10 * time.Second) deleteFilter := []string{"nslifetest"} - deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */) + deleted, err := framework.DeleteNamespaces(ctx, f.ClientSet, deleteFilter, nil /* skipFilter */) framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter) framework.ExpectEqual(len(deleted), totalNS) @@ -72,7 +72,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 - nsList, err := f.ClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + nsList, err := f.ClientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -89,14 +89,14 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max })) } -func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { +func ensurePodsAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *framework.Framework) { ginkgo.By("Creating a test namespace") namespaceName := "nsdeletetest" - namespace, err := f.CreateNamespace(namespaceName, nil) + namespace, err := f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Waiting for a default service account to be provisioned in namespace") - err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name) framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) ginkgo.By("Creating a pod in the namespace") @@ -114,21 +114,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, }, } - pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) ginkgo.By("Waiting for the pod to have running status") - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { return true, nil } @@ -136,24 +136,24 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { })) ginkgo.By("Recreating the namespace") - namespace, err = f.CreateNamespace(namespaceName, nil) + namespace, err = f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there are no pods in the namespace") - _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectError(err, "failed to get pod %s in namespace: %s", pod.Name, namespace.Name) } -func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { +func ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *framework.Framework) { var err error ginkgo.By("Creating a test namespace") namespaceName := "nsdeletetest" - namespace, err := f.CreateNamespace(namespaceName, nil) + namespace, err := f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Waiting for a default service account to be provisioned in namespace") - err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name) framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) ginkgo.By("Creating a service in the namespace") @@ -174,18 +174,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }}, }, } - service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { return true, nil } @@ -193,11 +193,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { })) ginkgo.By("Recreating the namespace") - namespace, err = f.CreateNamespace(namespaceName, nil) + namespace, err = f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there is no service in the namespace") - _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(context.TODO(), service.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(ctx, service.Name, metav1.GetOptions{}) framework.ExpectError(err, "failed to get service %s in namespace: %s", service.Name, namespace.Name) } @@ -240,23 +240,27 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { Testname: namespace-deletion-removes-pods Description: Ensure that if a namespace is deleted then all pods are removed from that namespace. */ - framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted", - func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) }) + framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted", func(ctx context.Context) { + ensurePodsAreRemovedWhenNamespaceIsDeleted(ctx, f) + }) /* Release: v1.11 Testname: namespace-deletion-removes-services Description: Ensure that if a namespace is deleted then all services are removed from that namespace. */ - framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted", - func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) }) + framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted", func(ctx context.Context) { + ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx, f) + }) - ginkgo.It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", - func() { extinguish(f, 100, 10, 150) }) + ginkgo.It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", func(ctx context.Context) { + extinguish(ctx, f, 100, 10, 150) + }) // On hold until etcd3; see #7372 - ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", - func() { extinguish(f, 100, 0, 150) }) + ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", func(ctx context.Context) { + extinguish(ctx, f, 100, 0, 150) + }) /* Release: v1.18 @@ -268,7 +272,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { framework.ConformanceIt("should patch a Namespace", func(ctx context.Context) { ginkgo.By("creating a Namespace") namespaceName := "nspatchtest-" + string(uuid.NewUUID()) - ns, err := f.CreateNamespace(namespaceName, nil) + ns, err := f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed creating Namespace") namespaceName = ns.ObjectMeta.Name @@ -279,11 +283,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { }, }) framework.ExpectNoError(err, "failed to marshal JSON patch data") - _, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, nspatch, metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Patch(ctx, namespaceName, types.StrategicMergePatchType, nspatch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Namespace") ginkgo.By("get the Namespace and ensuring it has the label") - namespace, err := f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + namespace, err := f.ClientSet.CoreV1().Namespaces().Get(ctx, namespaceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get Namespace") framework.ExpectEqual(namespace.ObjectMeta.Labels["testLabel"], "testValue", "namespace not patched") }) @@ -304,7 +308,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { ginkgo.By("Read namespace status") - unstruct, err := dc.Resource(nsResource).Get(context.TODO(), ns, metav1.GetOptions{}, "status") + unstruct, err := dc.Resource(nsResource).Get(ctx, ns, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns) nsStatus, err := unstructuredToNamespace(unstruct) framework.ExpectNoError(err, "Getting the status of the namespace %s", ns) @@ -322,7 +326,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { nsConditionJSON, err := json.Marshal(nsCondition) framework.ExpectNoError(err, "failed to marshal namespace condition") - patchedStatus, err := nsClient.Patch(context.TODO(), ns, types.MergePatchType, + patchedStatus, err := nsClient.Patch(ctx, ns, types.MergePatchType, []byte(`{"metadata":{"annotations":{"e2e-patched-ns-status":"`+ns+`"}},"status":{"conditions":[`+string(nsConditionJSON)+`]}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch status. err: %v ", err) @@ -335,7 +339,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { var statusUpdated *v1.Namespace err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - unstruct, err := dc.Resource(nsResource).Get(context.TODO(), ns, metav1.GetOptions{}, "status") + unstruct, err := dc.Resource(nsResource).Get(ctx, ns, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns) statusToUpdate, err := unstructuredToNamespace(unstruct) framework.ExpectNoError(err, "Getting the status of the namespace %s", ns) @@ -346,7 +350,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { Reason: "E2E", Message: "Updated by an e2e test", }) - statusUpdated, err = nsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + statusUpdated, err = nsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) @@ -371,11 +375,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { ginkgo.By(fmt.Sprintf("Updating Namespace %q", ns)) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updatedNamespace, err = cs.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}) + updatedNamespace, err = cs.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get Namespace %q", ns) updatedNamespace.Labels[ns] = "updated" - updatedNamespace, err = cs.CoreV1().Namespaces().Update(context.TODO(), updatedNamespace, metav1.UpdateOptions{}) + updatedNamespace, err = cs.CoreV1().Namespaces().Update(ctx, updatedNamespace, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "failed to update Namespace: %q", ns) @@ -398,7 +402,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { nsName := "e2e-ns-" + utilrand.String(5) ginkgo.By(fmt.Sprintf("Creating namespace %q", nsName)) - testNamespace, err := f.CreateNamespace(nsName, nil) + testNamespace, err := f.CreateNamespace(ctx, nsName, nil) framework.ExpectNoError(err, "failed creating Namespace") ns := testNamespace.ObjectMeta.Name nsClient := f.ClientSet.CoreV1().Namespaces() @@ -406,11 +410,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { ginkgo.By(fmt.Sprintf("Adding e2e finalizer to namespace %q", ns)) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updateNamespace, err := nsClient.Get(context.TODO(), ns, metav1.GetOptions{}) + updateNamespace, err := nsClient.Get(ctx, ns, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get Namespace %q", ns) updateNamespace.Spec.Finalizers = append(updateNamespace.Spec.Finalizers, fakeFinalizer) - updatedNamespace, err = nsClient.Finalize(context.TODO(), updateNamespace, metav1.UpdateOptions{}) + updatedNamespace, err = nsClient.Finalize(ctx, updateNamespace, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "failed to add finalizer to the namespace: %q", ns) @@ -427,7 +431,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { ginkgo.By(fmt.Sprintf("Removing e2e finalizer from namespace %q", ns)) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updatedNamespace, err = nsClient.Get(context.TODO(), ns, metav1.GetOptions{}) + updatedNamespace, err = nsClient.Get(ctx, ns, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get namespace %q", ns) var finalizerList []v1.FinalizerName @@ -437,7 +441,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { } } updatedNamespace.Spec.Finalizers = finalizerList - updatedNamespace, err = nsClient.Finalize(context.TODO(), updatedNamespace, metav1.UpdateOptions{}) + updatedNamespace, err = nsClient.Finalize(ctx, updatedNamespace, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "failed to remove finalizer from namespace: %q", ns) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index d1434339631..0ceb48a34c0 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -74,19 +74,19 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -99,34 +99,34 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a Service") service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP, false) - service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating a NodePort Service") nodeport := newTestServiceForQuota("test-service-np", v1.ServiceTypeNodePort, false) - nodeport, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), nodeport, metav1.CreateOptions{}) + nodeport, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, nodeport, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota") loadbalancer := newTestServiceForQuota("test-service-lb", v1.ServiceTypeLoadBalancer, true) - _, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), loadbalancer, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, loadbalancer, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Ensuring resource quota status captures service creation") @@ -134,19 +134,19 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceServices] = resource.MustParse("2") usedResources[v1.ResourceServicesNodePorts] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting Services") - err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, service.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), nodeport.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, nodeport.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceServices] = resource.MustParse("0") usedResources[v1.ResourceServicesNodePorts] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -162,8 +162,8 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) if len(secrets.Items) == found { // loop until the number of secrets has stabilized for 5 seconds @@ -179,26 +179,26 @@ var _ = SIGDescribe("ResourceQuota", func() { hardSecrets := fmt.Sprintf("%d", found+1) ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a Secret") secret := newTestSecretForQuota("test-secret") - secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures secret creation") @@ -206,16 +206,16 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets) // we expect there to be two secrets because each namespace will receive // a service account token secret by default - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a secret") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -230,19 +230,19 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a Pod that fits quota") @@ -255,7 +255,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod := newTestPodForQuota(f, podName, requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) podToUpdate := pod @@ -266,7 +266,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory] usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage] usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)] - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Not allowing a pod to be created that exceeds remaining quota") @@ -274,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("600m") requests[v1.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)") @@ -286,7 +286,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Ensuring a pod cannot update its resource requirements") @@ -296,15 +296,15 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceMemory] = resource.MustParse("100Mi") requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi") podToUpdate.Spec.Containers[0].Resources.Requests = requests - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, podToUpdate, metav1.UpdateOptions{}) framework.ExpectError(err) ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -314,7 +314,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceMemory] = resource.MustParse("0") usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0") usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) /* @@ -328,8 +328,8 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - err := wait.Poll(1*time.Second, time.Minute, func() (bool, error) { - configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + err := wait.PollWithContext(ctx, 1*time.Second, time.Minute, func(ctx context.Context) (bool, error) { + configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) if len(configmaps.Items) == found { // loop until the number of configmaps has stabilized for 15 seconds @@ -345,42 +345,42 @@ var _ = SIGDescribe("ResourceQuota", func() { hardConfigMaps := fmt.Sprintf("%d", found+1) ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) resourceQuota.Spec.Hard[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ConfigMap") configMap := newTestConfigMapForQuota("test-configmap") - configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures configMap creation") usedResources = v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a ConfigMap") - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps) - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -393,31 +393,31 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ReplicationController") replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) - replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController, metav1.CreateOptions{}) + replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, replicationController, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replication controller creation") usedResources = v1.ResourceList{} usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a ReplicationController") @@ -426,7 +426,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // detached. ReplicationControllers default to "orphan", which // is different from most resources. (Why? To preserve a common // workflow from prior to the GC's introduction.) - err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, metav1.DeleteOptions{ + err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(ctx, replicationController.Name, metav1.DeleteOptions{ PropagationPolicy: func() *metav1.DeletionPropagation { p := metav1.DeletePropagationBackground return &p @@ -436,7 +436,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -449,40 +449,40 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ReplicaSet") replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0) - replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) + replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, replicaSet, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replicaset creation") usedResources = v1.ResourceList{} usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a ReplicaSet") - err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, metav1.DeleteOptions{}) + err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(ctx, replicaSet.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -496,13 +496,13 @@ var _ = SIGDescribe("ResourceQuota", func() { */ ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") @@ -510,29 +510,29 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a PersistentVolumeClaim") pvc := newTestPersistentVolumeClaimForQuota("test-claim") - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") usedResources = v1.ResourceList{} usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -546,13 +546,13 @@ var _ = SIGDescribe("ResourceQuota", func() { */ ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") @@ -563,13 +563,13 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0") usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a PersistentVolumeClaim with storage class") pvc := newTestPersistentVolumeClaimForQuota("test-claim") pvc.Spec.StorageClassName = &classGold - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") @@ -579,11 +579,11 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1") usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("1Gi") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -592,7 +592,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0") usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -606,7 +606,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // in order to make sure the resourcequota controller knows this resource, we create one test // resourcequota object, and triggering updates on it until the status is updated. quotaName := "quota-for-" + testcrd.Crd.Spec.Names.Plural - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, &v1.ResourceQuota{ + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, &v1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: quotaName}, Spec: v1.ResourceQuotaSpec{ Hard: v1.ResourceList{ @@ -615,32 +615,32 @@ var _ = SIGDescribe("ResourceQuota", func() { }, }) framework.ExpectNoError(err) - err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName)) + err = updateResourceQuotaUntilUsageAppears(ctx, f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(ctx, quotaName, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Counting existing ResourceQuota") - c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) + c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota") quotaName = "test-quota" resourceQuota := newTestResourceQuota(quotaName) resourceQuota.Spec.Hard[v1.ResourceName(countResourceName)] = resource.MustParse("1") - _, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1)) usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a custom resource") resourceClient := testcrd.DynamicClients["v1"] - testcr, err := instantiateCustomResource(&unstructured.Unstructured{ + testcr, err := instantiateCustomResource(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name, "kind": testcrd.Crd.Spec.Names.Kind, @@ -654,11 +654,11 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Ensuring resource quota status captures custom resource creation") usedResources = v1.ResourceList{} usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a second custom resource") - _, err = instantiateCustomResource(&unstructured.Unstructured{ + _, err = instantiateCustomResource(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name, "kind": testcrd.Crd.Spec.Names.Kind, @@ -671,12 +671,12 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectError(err) ginkgo.By("Deleting a custom resource") - err = deleteCustomResource(resourceClient, testcr.GetName()) + err = deleteCustomResource(ctx, resourceClient, testcr.GetName()) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources) framework.ExpectNoError(err) }) @@ -692,22 +692,22 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with terminating scope") quotaTerminatingName := "quota-terminating" - resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) + resourceQuotaTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota with not terminating scope") quotaNotTerminatingName := "quota-not-terminating" - resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating)) + resourceQuotaNotTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a long running pod") @@ -719,7 +719,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -728,7 +728,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory] - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage") @@ -737,11 +737,11 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -750,7 +750,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a terminating pod") @@ -758,7 +758,7 @@ var _ = SIGDescribe("ResourceQuota", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -767,7 +767,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory] - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage") @@ -776,11 +776,11 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -789,7 +789,7 @@ var _ = SIGDescribe("ResourceQuota", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) }) @@ -804,45 +804,45 @@ var _ = SIGDescribe("ResourceQuota", func() { */ framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with best effort scope") - resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) + resourceQuotaBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota with not best effort scope") - resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort)) + resourceQuotaNotBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a not best-effort pod") @@ -853,26 +853,26 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) }) @@ -897,11 +897,11 @@ var _ = SIGDescribe("ResourceQuota", func() { resourceQuota.ObjectMeta.Name = quotaName resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1") resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi") - _, err := createResourceQuota(client, ns, resourceQuota) + _, err := createResourceQuota(ctx, client, ns, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Getting a ResourceQuota") - resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) + resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi")) @@ -909,23 +909,23 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Updating a ResourceQuota") resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2") resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi") - resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{}) + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(ctx, resourceQuota, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi")) ginkgo.By("Verifying a ResourceQuota was modified") - resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi")) ginkgo.By("Deleting a ResourceQuota") - err = deleteResourceQuota(client, ns, quotaName) + err = deleteResourceQuota(ctx, client, ns, quotaName) framework.ExpectNoError(err) ginkgo.By("Verifying the deleted ResourceQuota") - _, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) + _, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("Expected `not found` error, got: %v", err) } @@ -962,33 +962,33 @@ var _ = SIGDescribe("ResourceQuota", func() { } resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1") resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi") - _, err := createResourceQuota(client, ns, resourceQuota) + _, err := createResourceQuota(ctx, client, ns, resourceQuota) framework.ExpectNoError(err) ginkgo.By("Getting a ResourceQuota") - resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), rqName, metav1.GetOptions{}) + resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(ctx, rqName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi")) ginkgo.By("Listing all ResourceQuotas with LabelSelector") - rq, err := client.CoreV1().ResourceQuotas("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + rq, err := client.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Failed to list job. %v", err) framework.ExpectEqual(len(rq.Items), 1, "Failed to find ResourceQuotes %v", rqName) ginkgo.By("Patching the ResourceQuota") payload := "{\"metadata\":{\"labels\":{\"" + rqName + "\":\"patched\"}},\"spec\":{\"hard\":{ \"memory\":\"750Mi\"}}}" - patchedResourceQuota, err := client.CoreV1().ResourceQuotas(ns).Patch(context.TODO(), rqName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) + patchedResourceQuota, err := client.CoreV1().ResourceQuotas(ns).Patch(ctx, rqName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ResourceQuota %s in namespace %s", rqName, ns) framework.ExpectEqual(patchedResourceQuota.Labels[rqName], "patched", "Did not find the label for this ResourceQuota. Current labels: %v", patchedResourceQuota.Labels) framework.ExpectEqual(*patchedResourceQuota.Spec.Hard.Memory(), resource.MustParse("750Mi"), "Hard memory value for ResourceQuota %q is %s not 750Mi.", patchedResourceQuota.ObjectMeta.Name, patchedResourceQuota.Spec.Hard.Memory().String()) ginkgo.By("Deleting a Collection of ResourceQuotas") - err = client.CoreV1().ResourceQuotas(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labelSelector}) + err = client.CoreV1().ResourceQuotas(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err) ginkgo.By("Verifying the deleted ResourceQuota") - _, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), rqName, metav1.GetOptions{}) + _, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, rqName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("Expected `not found` error, got: %v", err) } @@ -1019,11 +1019,11 @@ var _ = SIGDescribe("ResourceQuota", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return rqClient.Watch(context.TODO(), options) + return rqClient.Watch(ctx, options) }, } - rqList, err := f.ClientSet.CoreV1().ResourceQuotas("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + rqList, err := f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Services") ginkgo.By(fmt.Sprintf("Creating resourceQuota %q", rqName)) @@ -1039,10 +1039,10 @@ var _ = SIGDescribe("ResourceQuota", func() { }, }, } - _, err = createResourceQuota(f.ClientSet, ns, resourceQuota) + _, err = createResourceQuota(ctx, f.ClientSet, ns, resourceQuota) framework.ExpectNoError(err) - initialResourceQuota, err := rqClient.Get(context.TODO(), rqName, metav1.GetOptions{}) + initialResourceQuota, err := rqClient.Get(ctx, rqName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(*initialResourceQuota.Spec.Hard.Cpu(), resource.MustParse("500m"), "Hard cpu value for ResourceQuota %q is %s not 500m.", initialResourceQuota.Name, initialResourceQuota.Spec.Hard.Cpu().String()) framework.Logf("Resource quota %q reports spec: hard cpu limit of %s", rqName, initialResourceQuota.Spec.Hard.Cpu()) @@ -1054,21 +1054,21 @@ var _ = SIGDescribe("ResourceQuota", func() { hardLimits := quota.Add(v1.ResourceList{}, initialResourceQuota.Spec.Hard) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updateStatus, err := rqClient.Get(context.TODO(), rqName, metav1.GetOptions{}) + updateStatus, err := rqClient.Get(ctx, rqName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get ResourceQuota %q", rqName) updateStatus.Status = v1.ResourceQuotaStatus{ Hard: hardLimits, } - updatedResourceQuota, err = rqClient.UpdateStatus(context.TODO(), updateStatus, metav1.UpdateOptions{}) + updatedResourceQuota, err = rqClient.UpdateStatus(ctx, updateStatus, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update resourceQuota") ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName)) - ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort) + ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort) defer cancel() - _, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rq, ok := event.Object.(*v1.ResourceQuota); ok { found := rq.Name == updatedResourceQuota.Name && rq.Namespace == ns && @@ -1087,11 +1087,11 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.Logf("ResourceQuota %q /status was updated", updatedResourceQuota.Name) // Sync resourceQuota list before patching /status - rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Services") ginkgo.By("Patching hard spec values for cpu & memory") - xResourceQuota, err := rqClient.Patch(context.TODO(), updatedResourceQuota.Name, types.StrategicMergePatchType, + xResourceQuota, err := rqClient.Patch(ctx, updatedResourceQuota.Name, types.StrategicMergePatchType, []byte(`{"spec":{"hard":{"cpu":"1","memory":"1Gi"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err, "Could not patch resourcequota %q. Error: %v", xResourceQuota.Name, err) @@ -1103,16 +1103,16 @@ var _ = SIGDescribe("ResourceQuota", func() { rqStatusJSON, err := json.Marshal(hardLimits) framework.ExpectNoError(err) - patchedResourceQuota, err := rqClient.Patch(context.TODO(), rqName, types.StrategicMergePatchType, + patchedResourceQuota, err := rqClient.Patch(ctx, rqName, types.StrategicMergePatchType, []byte(`{"status": {"hard": `+string(rqStatusJSON)+`}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName)) - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStartShort) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStartShort) defer cancel() - _, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rq, ok := event.Object.(*v1.ResourceQuota); ok { found := rq.Name == patchedResourceQuota.Name && rq.Namespace == ns && @@ -1132,7 +1132,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By(fmt.Sprintf("Get %q /status", rqName)) rqResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} - unstruct, err := f.DynamicClient.Resource(rqResource).Namespace(ns).Get(context.TODO(), resourceQuota.Name, metav1.GetOptions{}, "status") + unstruct, err := f.DynamicClient.Resource(rqResource).Namespace(ns).Get(ctx, resourceQuota.Name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) rq, err := unstructuredToResourceQuota(unstruct) @@ -1144,7 +1144,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.Logf("Resourcequota %q reports status: hard memory of %s", rqName, rq.Status.Hard.Memory()) // Sync resourceQuota list before repatching /status - rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Services") ginkgo.By(fmt.Sprintf("Repatching %q /status before checking Spec is unchanged", rqName)) @@ -1155,7 +1155,7 @@ var _ = SIGDescribe("ResourceQuota", func() { rqStatusJSON, err = json.Marshal(newHardLimits) framework.ExpectNoError(err) - repatchedResourceQuota, err := rqClient.Patch(context.TODO(), rqName, types.StrategicMergePatchType, + repatchedResourceQuota, err := rqClient.Patch(ctx, rqName, types.StrategicMergePatchType, []byte(`{"status": {"hard": `+string(rqStatusJSON)+`}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) @@ -1165,7 +1165,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectEqual(*repatchedResourceQuota.Status.Hard.Memory(), resource.MustParse("2Gi"), "Hard memory value for ResourceQuota %q is %s not 2Gi.", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Memory().String()) framework.Logf("Resourcequota %q reports status: hard memory of %s", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Memory()) - _, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rq, ok := event.Object.(*v1.ResourceQuota); ok { found := rq.Name == patchedResourceQuota.Name && rq.Namespace == ns && @@ -1183,8 +1183,8 @@ var _ = SIGDescribe("ResourceQuota", func() { }) framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns) - err = wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { - resourceQuotaResult, err := rqClient.Get(context.TODO(), rqName, metav1.GetOptions{}) + err = wait.PollImmediateWithContext(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { + resourceQuotaResult, err := rqClient.Get(ctx, rqName, metav1.GetOptions{}) framework.ExpectNoError(err) if apiequality.Semantic.DeepEqual(resourceQuotaResult.Spec.Hard.Cpu(), resourceQuotaResult.Status.Hard.Cpu()) { @@ -1206,45 +1206,45 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with best effort scope") - resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) + resourceQuotaBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota with not best effort scope") - resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort)) + resourceQuotaNotBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a not best-effort pod") @@ -1255,47 +1255,47 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with terminating scope") quotaTerminatingName := "quota-terminating" - resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) + resourceQuotaTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a ResourceQuota with not terminating scope") quotaNotTerminatingName := "quota-not-terminating" - resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating)) + resourceQuotaNotTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a long running pod") @@ -1307,7 +1307,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -1316,7 +1316,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory] - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage") @@ -1325,11 +1325,11 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1338,7 +1338,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a terminating pod") @@ -1346,7 +1346,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -1355,7 +1355,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory] - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage") @@ -1364,11 +1364,11 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1377,7 +1377,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) framework.ExpectNoError(err) }) }) @@ -1388,7 +1388,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1397,39 +1397,39 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("1") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class") podName := "testpod-pclass1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1438,45 +1438,45 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("1") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating first pod with priority class should pass") podName := "testpod-pclass2-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating 2nd pod with priority class should fail") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Deleting first pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1485,51 +1485,51 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("1") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class with pclass3") podName := "testpod-pclass3-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a 2nd pod with priority class pclass3") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod2.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } - _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1538,52 +1538,52 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("2") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class pclass5") podName := "testpod-pclass5" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating 2nd pod with priority class pclass6") podName2 := "testpod-pclass6" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2, metav1.CreateOptions{}) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("2") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod2.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1592,34 +1592,34 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("1") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class pclass7") podName := "testpod-pclass7" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is not used") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1628,39 +1628,39 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourcePods] = resource.MustParse("1") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") usedResources := v1.ResourceList{} usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class pclass8") podName := "testpod-pclass8" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") usedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func(ctx context.Context) { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) } @@ -1673,7 +1673,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi") ginkgo.By("Creating a ResourceQuota with priority class scope") - resourceQuotaPriorityClass, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"})) + resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"})) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") @@ -1683,7 +1683,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod with priority class") @@ -1696,7 +1696,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { limit[v1.ResourceMemory] = resource.MustParse("2Gi") pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1705,11 +1705,11 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1718,7 +1718,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources) framework.ExpectNoError(err) }) @@ -1730,12 +1730,12 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with cross namespace pod affinity scope") quota, err := createResourceQuota( - f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-cross-namespace-pod-affinity", v1.ResourceQuotaScopeCrossNamespacePodAffinity)) + ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-cross-namespace-pod-affinity", v1.ResourceQuotaScopeCrossNamespacePodAffinity)) framework.ExpectNoError(err) ginkgo.By("Ensuring ResourceQuota status is calculated") wantUsedResources := v1.ResourceList{v1.ResourcePods: resource.MustParse("0")} - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod that does not use cross namespace affinity") @@ -1744,7 +1744,7 @@ var _ = SIGDescribe("ResourceQuota", func() { RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{ TopologyKey: "region", }}}}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating a pod that uses namespaces field") @@ -1754,12 +1754,12 @@ var _ = SIGDescribe("ResourceQuota", func() { TopologyKey: "region", Namespaces: []string{"ns1"}, }}}}) - podWithNamespaces, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podWithNamespaces, metav1.CreateOptions{}) + podWithNamespaces, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podWithNamespaces, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota captures podWithNamespaces usage") wantUsedResources[v1.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) framework.ExpectNoError(err) ginkgo.By("Creating a pod that uses namespaceSelector field") @@ -1776,25 +1776,25 @@ var _ = SIGDescribe("ResourceQuota", func() { }, }, }}}}}) - podWithNamespaceSelector, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podWithNamespaceSelector, metav1.CreateOptions{}) + podWithNamespaceSelector, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podWithNamespaceSelector, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota captures podWithNamespaceSelector usage") wantUsedResources[v1.ResourcePods] = resource.MustParse("2") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) framework.ExpectNoError(err) ginkgo.By("Deleting the pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podWithNamespaces.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podWithNamespaces.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podWithNamespaceSelector.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podWithNamespaceSelector.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") wantUsedResources[v1.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) + err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources) framework.ExpectNoError(err) }) }) @@ -2089,22 +2089,22 @@ func newTestSecretForQuota(name string) *v1.Secret { } // createResourceQuota in the specified namespace -func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { - return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota, metav1.CreateOptions{}) +func createResourceQuota(ctx context.Context, c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { + return c.CoreV1().ResourceQuotas(namespace).Create(ctx, resourceQuota, metav1.CreateOptions{}) } // deleteResourceQuota with the specified name -func deleteResourceQuota(c clientset.Interface, namespace, name string) error { - return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) +func deleteResourceQuota(ctx context.Context, c clientset.Interface, namespace, name string) error { + return c.CoreV1().ResourceQuotas(namespace).Delete(ctx, name, metav1.DeleteOptions{}) } // countResourceQuota counts the number of ResourceQuota in the specified namespace // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. -func countResourceQuota(c clientset.Interface, namespace string) (int, error) { +func countResourceQuota(ctx context.Context, c clientset.Interface, namespace string) (int, error) { found, unchanged := 0, 0 - return found, wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{}) + return found, wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { + resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) if len(resourceQuotas.Items) == found { // loop until the number of resource quotas has stabilized for 5 seconds @@ -2118,9 +2118,9 @@ func countResourceQuota(c clientset.Interface, namespace string) (int, error) { } // wait for resource quota status to show the expected used resources value -func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error { - return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { - resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) +func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error { + return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) { + resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) if err != nil { return false, err } @@ -2141,9 +2141,9 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R // updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated // for the specific resource name. -func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error { - return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { - resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) +func updateResourceQuotaUntilUsageAppears(ctx context.Context, c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error { + return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) { + resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) if err != nil { return false, err } @@ -2156,7 +2156,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s current := resourceQuota.Spec.Hard[resourceName] current.Add(resource.MustParse("1")) resourceQuota.Spec.Hard[resourceName] = current - _, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota, metav1.UpdateOptions{}) + _, err = c.CoreV1().ResourceQuotas(ns).Update(ctx, resourceQuota, metav1.UpdateOptions{}) // ignoring conflicts since someone else may already updated it. if apierrors.IsConflict(err) { return false, nil diff --git a/test/e2e/apimachinery/storage_version.go b/test/e2e/apimachinery/storage_version.go index 57ba66fd4ef..ac5343045e1 100644 --- a/test/e2e/apimachinery/storage_version.go +++ b/test/e2e/apimachinery/storage_version.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func GenerateName: svName, }, } - createdSV, err := client.InternalV1alpha1().StorageVersions().Create(context.TODO(), sv, metav1.CreateOptions{}) + createdSV, err := client.InternalV1alpha1().StorageVersions().Create(ctx, sv, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating storage version") // update the created sv with server storage version @@ -63,14 +63,14 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func CommonEncodingVersion: &version, } _, err = client.InternalV1alpha1().StorageVersions().UpdateStatus( - context.TODO(), createdSV, metav1.UpdateOptions{}) + ctx, createdSV, metav1.UpdateOptions{}) framework.ExpectNoError(err, "updating storage version") // wait for sv to be GC'ed framework.Logf("Waiting for storage version %v to be garbage collected", createdSV.Name) err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { _, err := client.InternalV1alpha1().StorageVersions().Get( - context.TODO(), createdSV.Name, metav1.GetOptions{}) + ctx, createdSV.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 53c2c46dce4..21cace2814b 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -57,11 +57,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { podName := "pod-1" framework.Logf("Creating pod %s", podName) - _, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(ns, podName), metav1.CreateOptions{}) + _, err := c.CoreV1().Pods(ns).Create(ctx, newTablePod(ns, podName), metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns) table := &metav1beta1.Table{} - err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(context.TODO()).Into(table) + err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(ctx).Into(table) framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns) framework.Logf("Table: %#v", table) @@ -83,9 +83,9 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { client := c.CoreV1().PodTemplates(ns) ginkgo.By("creating a large number of resources") - workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) { + workqueue.ParallelizeUntil(ctx, 5, 20, func(i int) { for tries := 3; tries >= 0; tries-- { - _, err := client.Create(context.TODO(), &v1.PodTemplate{ + _, err := client.Create(ctx, &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("template-%04d", i), }, @@ -109,7 +109,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates"). VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). - Do(context.TODO()).Into(pagedTable) + Do(ctx).Into(pagedTable) framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) framework.ExpectEqual(len(pagedTable.Rows), 2) framework.ExpectNotEqual(pagedTable.ResourceVersion, "") @@ -120,7 +120,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates"). VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). - Do(context.TODO()).Into(pagedTable) + Do(ctx).Into(pagedTable) framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0)) framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0002") @@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { c := f.ClientSet table := &metav1beta1.Table{} - err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(context.TODO()).Into(table) + err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(ctx).Into(table) framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces") framework.Logf("Table: %#v", table) @@ -163,7 +163,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { }, }, } - err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do(context.TODO()).Into(table) + err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do(ctx).Into(table) framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar) framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406)) }) diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 15ba8fc9841..9559eb809d7 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -62,15 +62,15 @@ var _ = SIGDescribe("Watchers", func() { ns := f.Namespace.Name ginkgo.By("creating a watch on configmaps with label A") - watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA) + watchA, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueA) framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA) ginkgo.By("creating a watch on configmaps with label B") - watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB) + watchB, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueB) framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB) ginkgo.By("creating a watch on configmaps with label A or B") - watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB) + watchAB, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB) framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB) testConfigMapA := &v1.ConfigMap{ @@ -91,13 +91,13 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification") - testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA, metav1.CreateOptions{}) + testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMapA, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) expectEvent(watchA, watch.Added, testConfigMapA) expectEvent(watchAB, watch.Added, testConfigMapA) ginkgo.By("modifying configmap A and ensuring the correct watchers observe the notification") - testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { + testConfigMapA, err = updateConfigMap(ctx, c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) @@ -105,7 +105,7 @@ var _ = SIGDescribe("Watchers", func() { expectEvent(watchAB, watch.Modified, testConfigMapA) ginkgo.By("modifying configmap A again and ensuring the correct watchers observe the notification") - testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { + testConfigMapA, err = updateConfigMap(ctx, c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) @@ -113,20 +113,20 @@ var _ = SIGDescribe("Watchers", func() { expectEvent(watchAB, watch.Modified, testConfigMapA) ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), metav1.DeleteOptions{}) + err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMapA.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification") - testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB, metav1.CreateOptions{}) + testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMapB, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns) expectEvent(watchB, watch.Added, testConfigMapB) expectEvent(watchAB, watch.Added, testConfigMapB) expectNoEvent(watchA, watch.Added, testConfigMapB) ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), metav1.DeleteOptions{}) + err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMapB.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) expectEvent(watchB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) @@ -153,27 +153,27 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a new configmap") - testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) + testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap once") - testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapFirstUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap a second time") - testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) + err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("creating a watch on configmaps from the resource version returned by the first update") - testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue) + testWatch, err := watchConfigMaps(ctx, f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue) framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion) ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update") @@ -203,15 +203,15 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a watch on configmaps") - testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue) + testWatchBroken, err := watchConfigMaps(ctx, f, "", watchRestartedLabelValue) framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") - _, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + _, err = updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) @@ -225,7 +225,7 @@ var _ = SIGDescribe("Watchers", func() { testWatchBroken.Stop() ginkgo.By("modifying the configmap a second time, while the watch is closed") - testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) @@ -235,11 +235,11 @@ var _ = SIGDescribe("Watchers", func() { if !ok { framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent) } - testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue) + testWatchRestarted, err := watchConfigMaps(ctx, f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue) framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) + err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed") @@ -269,21 +269,21 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a watch on configmaps with a certain label") - testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue) + testWatch, err := watchConfigMaps(ctx, f, "", toBeChangedLabelValue) framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") - testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapFirstUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "1") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) ginkgo.By("changing the label value of the configmap") - _, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + _, err = updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value" }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns) @@ -294,7 +294,7 @@ var _ = SIGDescribe("Watchers", func() { expectEvent(testWatch, watch.Deleted, nil) ginkgo.By("modifying the configmap a second time") - testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "2") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) @@ -303,19 +303,19 @@ var _ = SIGDescribe("Watchers", func() { expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate) ginkgo.By("changing the label value of the configmap back") - testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapLabelRestored, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns) ginkgo.By("modifying the configmap a third time") - testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { + testConfigMapThirdUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { setConfigMapData(cm, "mutation", "3") }) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) + err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored") @@ -338,7 +338,7 @@ var _ = SIGDescribe("Watchers", func() { iterations := 100 ginkgo.By("getting a starting resourceVersion") - configmaps, err := c.CoreV1().ConfigMaps(ns).List(context.TODO(), metav1.ListOptions{}) + configmaps, err := c.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list configmaps in the namespace %s", ns) resourceVersion := configmaps.ResourceVersion @@ -348,12 +348,12 @@ var _ = SIGDescribe("Watchers", func() { go func() { defer ginkgo.GinkgoRecover() defer close(donec) - produceConfigMapEvents(f, stopc, 5*time.Millisecond) + produceConfigMapEvents(ctx, f, stopc, 5*time.Millisecond) }() listWatcher := &cachetools.ListWatch{ WatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) { - return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), listOptions) + return c.CoreV1().ConfigMaps(ns).Watch(ctx, listOptions) }, } @@ -379,7 +379,7 @@ var _ = SIGDescribe("Watchers", func() { }) }) -func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) { +func watchConfigMaps(ctx context.Context, f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) { c := f.ClientSet ns := f.Namespace.Name opts := metav1.ListOptions{ @@ -394,7 +394,7 @@ func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...s }, }), } - return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), opts) + return c.CoreV1().ConfigMaps(ns).Watch(ctx, opts) } func int64ptr(i int) *int64 { @@ -467,7 +467,7 @@ const ( deleteEvent ) -func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) { +func produceConfigMapEvents(ctx context.Context, f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) { c := f.ClientSet ns := f.Namespace.Name @@ -493,7 +493,7 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa Name: name(i), }, } - _, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err := c.CoreV1().ConfigMaps(ns).Create(ctx, cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns) existing = append(existing, i) i++ @@ -507,12 +507,12 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa }, }, } - _, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) + _, err := c.CoreV1().ConfigMaps(ns).Update(ctx, cm, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) updates++ case deleteEvent: idx := rand.Intn(len(existing)) - err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), metav1.DeleteOptions{}) + err := c.CoreV1().ConfigMaps(ns).Delete(ctx, name(existing[idx]), metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) existing = append(existing[:idx], existing[idx+1:]...) default: diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index d397d057040..68e54913e3e 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -87,23 +87,23 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { var client clientset.Interface var namespaceName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet namespaceName = f.Namespace.Name // Make sure the namespace created for the test is labeled to be selected by the webhooks - labelNamespace(f, f.Namespace.Name) - createWebhookConfigurationReadyNamespace(f) + labelNamespace(ctx, f, f.Namespace.Name) + createWebhookConfigurationReadyNamespace(ctx, f) ginkgo.By("Setting up server cert") certCtx = setupServerCert(namespaceName, serviceName) - createAuthReaderRoleBinding(f, namespaceName) + createAuthReaderRoleBinding(ctx, f, namespaceName) - deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) + deployWebhookAndService(ctx, f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) }) - ginkgo.AfterEach(func() { - cleanWebhookTest(client, namespaceName) + ginkgo.AfterEach(func(ctx context.Context) { + cleanWebhookTest(ctx, client, namespaceName) }) /* @@ -118,7 +118,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { { ginkgo.By("fetching the /apis discovery document") apiGroupList := &metav1.APIGroupList{} - err := client.Discovery().RESTClient().Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) + err := client.Discovery().RESTClient().Get().AbsPath("/apis").Do(ctx).Into(apiGroupList) framework.ExpectNoError(err, "fetching /apis") ginkgo.By("finding the admissionregistration.k8s.io API group in the /apis discovery document") @@ -145,7 +145,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { { ginkgo.By("fetching the /apis/admissionregistration.k8s.io discovery document") group := &metav1.APIGroup{} - err := client.Discovery().RESTClient().Get().AbsPath("/apis/admissionregistration.k8s.io").Do(context.TODO()).Into(group) + err := client.Discovery().RESTClient().Get().AbsPath("/apis/admissionregistration.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err, "fetching /apis/admissionregistration.k8s.io") framework.ExpectEqual(group.Name, admissionregistrationv1.GroupName, "verifying API group name in /apis/admissionregistration.k8s.io discovery document") @@ -163,7 +163,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { { ginkgo.By("fetching the /apis/admissionregistration.k8s.io/v1 discovery document") apiResourceList := &metav1.APIResourceList{} - err := client.Discovery().RESTClient().Get().AbsPath("/apis/admissionregistration.k8s.io/v1").Do(context.TODO()).Into(apiResourceList) + err := client.Discovery().RESTClient().Get().AbsPath("/apis/admissionregistration.k8s.io/v1").Do(ctx).Into(apiResourceList) framework.ExpectNoError(err, "fetching /apis/admissionregistration.k8s.io/v1") framework.ExpectEqual(apiResourceList.GroupVersion, admissionregistrationv1.SchemeGroupVersion.String(), "verifying API group/version in /apis/admissionregistration.k8s.io/v1 discovery document") @@ -195,8 +195,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { namespace based on the webhook namespace selector MUST be allowed. */ framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) { - registerWebhook(f, f.UniqueName, certCtx, servicePort) - testWebhook(f) + registerWebhook(ctx, f, f.UniqueName, certCtx, servicePort) + testWebhook(ctx, f) }) /* @@ -206,8 +206,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Attempts to attach MUST be denied. */ framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) { - registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort) - testAttachingPodWebhook(f) + registerWebhookForAttachingPod(ctx, f, f.UniqueName, certCtx, servicePort) + testAttachingPodWebhook(ctx, f) }) /* @@ -222,9 +222,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { return } ginkgo.DeferCleanup(testcrd.CleanUp) - registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) - testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"]) - testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"]) + registerWebhookForCustomResource(ctx, f, f.UniqueName, certCtx, testcrd, servicePort) + testCustomResourceWebhook(ctx, f, testcrd.Crd, testcrd.DynamicClients["v1"]) + testBlockingCustomResourceUpdateDeletion(ctx, f, testcrd.Crd, testcrd.DynamicClients["v1"]) }) /* @@ -234,8 +234,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Attempt operations that require the admission webhook; all MUST be denied. */ framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) { - registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort) - testFailClosedWebhook(f) + registerFailClosedWebhook(ctx, f, f.UniqueName, certCtx, servicePort) + testFailClosedWebhook(ctx, f) }) /* @@ -246,8 +246,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map. */ framework.ConformanceIt("should mutate configmap", func(ctx context.Context) { - registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort) - testMutatingConfigMapWebhook(f) + registerMutatingWebhookForConfigMap(ctx, f, f.UniqueName, certCtx, servicePort) + testMutatingConfigMapWebhook(ctx, f) }) /* @@ -257,8 +257,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted. */ framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) { - registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort) - testMutatingPodWebhook(f) + registerMutatingWebhookForPod(ctx, f, f.UniqueName, certCtx, servicePort) + testMutatingPodWebhook(ctx, f) }) /* @@ -269,9 +269,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { MUST NOT be mutated the webhooks. */ framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) { - registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort) - registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort) - testWebhooksForWebhookConfigurations(f, f.UniqueName, certCtx, servicePort) + registerValidatingWebhookForWebhookConfigurations(ctx, f, f.UniqueName+"blocking", certCtx, servicePort) + registerMutatingWebhookForWebhookConfigurations(ctx, f, f.UniqueName+"blocking", certCtx, servicePort) + testWebhooksForWebhookConfigurations(ctx, f, f.UniqueName, certCtx, servicePort) }) /* @@ -286,8 +286,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { return } ginkgo.DeferCleanup(testcrd.CleanUp) - registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) - testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false) + registerMutatingWebhookForCustomResource(ctx, f, f.UniqueName, certCtx, testcrd, servicePort) + testMutatingCustomResourceWebhook(ctx, f, testcrd.Crd, testcrd.DynamicClients["v1"], false) }) /* @@ -297,9 +297,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { custom resource definition; the create request MUST be denied. */ framework.ConformanceIt("should deny crd creation", func(ctx context.Context) { - registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort) + registerValidatingWebhookForCRD(ctx, f, f.UniqueName, certCtx, servicePort) - testCRDDenyWebhook(f) + testCRDDenyWebhook(ctx, f) }) /* @@ -316,8 +316,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { return } ginkgo.DeferCleanup(testcrd.CleanUp) - registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) - testMultiVersionCustomResourceWebhook(f, testcrd) + registerMutatingWebhookForCustomResource(ctx, f, f.UniqueName, certCtx, testcrd, servicePort) + testMultiVersionCustomResourceWebhook(ctx, f, testcrd) }) /* @@ -353,8 +353,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { return } ginkgo.DeferCleanup(testcrd.CleanUp) - registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) - testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune) + registerMutatingWebhookForCustomResource(ctx, f, f.UniqueName, certCtx, testcrd, servicePort) + testMutatingCustomResourceWebhook(ctx, f, testcrd.Crd, testcrd.DynamicClients["v1"], prune) }) /* @@ -371,24 +371,24 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { policyIgnore := admissionregistrationv1.Ignore ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)") - slowWebhookCleanup := registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(1), servicePort) - testSlowWebhookTimeoutFailEarly(f) - slowWebhookCleanup() + slowWebhookCleanup := registerSlowWebhook(ctx, f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(1), servicePort) + testSlowWebhookTimeoutFailEarly(ctx, f) + slowWebhookCleanup(ctx) ginkgo.By("Having no error when timeout is shorter than webhook latency and failure policy is ignore") - slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyIgnore, pointer.Int32Ptr(1), servicePort) - testSlowWebhookTimeoutNoError(f) - slowWebhookCleanup() + slowWebhookCleanup = registerSlowWebhook(ctx, f, f.UniqueName, certCtx, &policyIgnore, pointer.Int32Ptr(1), servicePort) + testSlowWebhookTimeoutNoError(ctx, f) + slowWebhookCleanup(ctx) ginkgo.By("Having no error when timeout is longer than webhook latency") - slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(10), servicePort) - testSlowWebhookTimeoutNoError(f) - slowWebhookCleanup() + slowWebhookCleanup = registerSlowWebhook(ctx, f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(10), servicePort) + testSlowWebhookTimeoutNoError(ctx, f) + slowWebhookCleanup(ctx) ginkgo.By("Having no error when timeout is empty (defaulted to 10s in v1)") - slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, nil, servicePort) - testSlowWebhookTimeoutNoError(f) - slowWebhookCleanup() + slowWebhookCleanup = registerSlowWebhook(ctx, f, f.UniqueName, certCtx, &policyFail, nil, servicePort) + testSlowWebhookTimeoutNoError(ctx, f) + slowWebhookCleanup(ctx) }) /* @@ -403,7 +403,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { admissionClient := client.AdmissionregistrationV1() ginkgo.By("Creating a validating webhook configuration") - hook, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + hook, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: f.UniqueName, }, @@ -414,20 +414,20 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating validating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), hook.Name, metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, hook.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting validating webhook configuration") }() // ensure backend is ready before proceeding - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -439,10 +439,10 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Updating a validating webhook configuration's rules to not include the create operation") err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - h, err := admissionClient.ValidatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{}) + h, err := admissionClient.ValidatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting validating webhook configuration") h.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - _, err = admissionClient.ValidatingWebhookConfigurations().Update(context.TODO(), h, metav1.UpdateOptions{}) + _, err = admissionClient.ValidatingWebhookConfigurations().Update(ctx, h, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Updating validating webhook configuration") @@ -450,21 +450,21 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be allowed creation since webhook was updated to not validate create", f.Namespace.Name) ginkgo.By("Patching a validating webhook configuration's rules to include the create operation") - hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, + hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(ctx, f.UniqueName, types.JSONPatchType, []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "Patching validating webhook configuration") @@ -472,9 +472,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -498,7 +498,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { admissionClient := client.AdmissionregistrationV1() ginkgo.By("Creating a mutating webhook configuration") - hook, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + hook, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: f.UniqueName, }, @@ -509,29 +509,29 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating mutating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), hook.Name, metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, hook.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting mutating webhook configuration") }() // ensure backend is ready before proceeding - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - hook, err = admissionClient.MutatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{}) + hook, err = admissionClient.MutatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting mutating webhook configuration") ginkgo.By("Updating a mutating webhook configuration's rules to not include the create operation") hook.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - hook, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), hook, metav1.UpdateOptions{}) + hook, err = admissionClient.MutatingWebhookConfigurations().Update(ctx, hook, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Updating mutating webhook configuration") ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -539,7 +539,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s this is not mutated", f.Namespace.Name) ginkgo.By("Patching a mutating webhook configuration's rules to include the create operation") - hook, err = admissionClient.MutatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, + hook, err = admissionClient.MutatingWebhookConfigurations().Patch(ctx, f.UniqueName, types.JSONPatchType, []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`), metav1.PatchOptions{}) framework.ExpectNoError(err, "Patching mutating webhook configuration") @@ -547,11 +547,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -573,7 +573,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { for i := 0; i < testListSize; i++ { name := fmt.Sprintf("%s-%d", f.UniqueName, i) - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{"e2e-list-test-uuid": testUUID}, @@ -588,20 +588,20 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} ginkgo.By("Listing all of the created validation webhooks") - list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), selectorListOpts) + list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(ctx, selectorListOpts) framework.ExpectNoError(err, "Listing validating webhook configurations") framework.ExpectEqual(len(list.Items), testListSize) // ensure backend is ready before proceeding - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -613,20 +613,20 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be denied creation by validating webhook", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, selectorListOpts) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(ctx, metav1.DeleteOptions{}, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of validating webhook configurations") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) @@ -647,7 +647,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { for i := 0; i < testListSize; i++ { name := fmt.Sprintf("%s-%d", f.UniqueName, i) - _, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{"e2e-list-test-uuid": testUUID}, @@ -662,22 +662,22 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} ginkgo.By("Listing all of the created validation webhooks") - list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), selectorListOpts) + list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, selectorListOpts) framework.ExpectNoError(err, "Listing mutating webhook configurations") framework.ExpectEqual(len(list.Items), testListSize) // ensure backend is ready before proceeding - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -685,17 +685,17 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be mutated", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, selectorListOpts) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(ctx, metav1.DeleteOptions{}, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of mutating webhook configurations") ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{}) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -704,11 +704,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) }) -func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { +func createAuthReaderRoleBinding(ctx context.Context, f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication") client := f.ClientSet // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap - _, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ + _, err := client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleBindingName, Annotations: map[string]string{ @@ -736,7 +736,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { } } -func deployWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) { +func deployWebhookAndService(ctx context.Context, f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) { ginkgo.By("Deploying the webhook pod") client := f.ClientSet @@ -752,7 +752,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -805,7 +805,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert d.Spec.Template.Spec.Containers = containers d.Spec.Template.Spec.Volumes = volumes - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -833,17 +833,17 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, }, } - _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") - err = framework.WaitForServiceEndpointsNum(client, namespace, serviceName, 1, 1*time.Second, 30*time.Second) + err = framework.WaitForServiceEndpointsNum(ctx, client, namespace, serviceName, 1, 1*time.Second, 30*time.Second) framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceName, 1) } func strPtr(s string) *string { return &s } -func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerWebhook(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the webhook via the AdmissionRegistration API") @@ -856,7 +856,7 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon MatchLabels: map[string]string{f.UniqueName: "true"}, } - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -873,20 +873,20 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon }) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerWebhookForAttachingPod(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the webhook via the AdmissionRegistration API") namespace := f.Namespace.Name sideEffectsNone := admissionregistrationv1.SideEffectClassNone - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -923,19 +923,19 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c }) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerMutatingWebhookForConfigMap(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API") namespace := f.Namespace.Name - _, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -948,16 +948,16 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri }) framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func testMutatingConfigMapWebhook(f *framework.Framework) { +func testMutatingConfigMapWebhook(ctx context.Context, f *framework.Framework) { ginkgo.By("create a configmap that should be updated by the webhook") client := f.ClientSet configMap := toBeMutatedConfigMap(f) - mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) expectedConfigMapData := map[string]string{ "mutation-start": "yes", @@ -969,14 +969,14 @@ func testMutatingConfigMapWebhook(f *framework.Framework) { } } -func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerMutatingWebhookForPod(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API") namespace := f.Namespace.Name sideEffectsNone := admissionregistrationv1.SideEffectClassNone - _, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1013,17 +1013,17 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce }) framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func testMutatingPodWebhook(f *framework.Framework) { +func testMutatingPodWebhook(ctx context.Context, f *framework.Framework) { ginkgo.By("create a pod that should be updated by the webhook") client := f.ClientSet pod := toBeMutatedPod(f) - mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) if len(mutatedPod.Spec.InitContainers) != 1 { framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) @@ -1052,12 +1052,12 @@ func toBeMutatedPod(f *framework.Framework) *v1.Pod { } } -func testWebhook(f *framework.Framework) { +func testWebhook(ctx context.Context, f *framework.Framework) { ginkgo.By("create a pod that should be denied by the webhook") client := f.ClientSet // Creating the pod, the request should be rejected pod := nonCompliantPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name) expectedErrMsg1 := "the pod contains unwanted container name" if !strings.Contains(err.Error(), expectedErrMsg1) { @@ -1072,7 +1072,7 @@ func testWebhook(f *framework.Framework) { client = f.ClientSet // Creating the pod, the request should be rejected pod = hangingPod(f) - _, err = client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = client.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name) // ensure the error is webhook-related, not client-side if !strings.Contains(err.Error(), "webhook") { @@ -1083,14 +1083,14 @@ func testWebhook(f *framework.Framework) { framework.Failf("expect error %q, got %q", "deadline", err.Error()) } // ensure the pod was not actually created - if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { framework.Failf("expect notfound error looking for rejected pod, got %v", err) } ginkgo.By("create a configmap that should be denied by the webhook") // Creating the configmap, the request should be rejected configmap := nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configmap, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name) expectedErrMsg := "the configmap contains unwanted key and value" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -1107,7 +1107,7 @@ func testWebhook(f *framework.Framework) { "admit": "this", }, } - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name) ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook") @@ -1117,7 +1117,7 @@ func testWebhook(f *framework.Framework) { } cm.Data["webhook-e2e-test"] = "webhook-disallow" } - _, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn) + _, err = updateConfigMap(ctx, client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn) framework.ExpectError(err, "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name) if !strings.Contains(err.Error(), expectedErrMsg) { framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) @@ -1125,14 +1125,14 @@ func testWebhook(f *framework.Framework) { ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook") patch := nonCompliantConfigMapPatch() - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(context.TODO(), allowedConfigMapName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(ctx, allowedConfigMapName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch) if !strings.Contains(err.Error(), expectedErrMsg) { framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) } ginkgo.By("create a namespace that bypass the webhook") - err = createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{ + err = createNamespace(ctx, f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: skippedNamespaceName, Labels: map[string]string{ skipNamespaceLabelKey: skipNamespaceLabelValue, @@ -1147,17 +1147,17 @@ func testWebhook(f *framework.Framework) { ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") configmap = nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(ctx, configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName) } -func testAttachingPodWebhook(f *framework.Framework) { +func testAttachingPodWebhook(ctx context.Context, f *framework.Framework) { ginkgo.By("create a pod") client := f.ClientSet pod := toBeAttachedPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) - err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") @@ -1200,7 +1200,7 @@ func failingWebhook(namespace, name string, servicePort int32) admissionregistra } } -func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerFailClosedWebhook(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API") namespace := f.Namespace.Name @@ -1219,7 +1219,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt }, } - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1233,15 +1233,15 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt }) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func testFailClosedWebhook(f *framework.Framework) { +func testFailClosedWebhook(ctx context.Context, f *framework.Framework) { client := f.ClientSet ginkgo.By("create a namespace for the webhook") - err := createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{ + err := createNamespace(ctx, f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: failNamespaceName, Labels: map[string]string{ failNamespaceLabelKey: failNamespaceLabelValue, @@ -1257,14 +1257,14 @@ func testFailClosedWebhook(f *framework.Framework) { Name: "foo", }, } - _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(ctx, configmap, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName) if !apierrors.IsInternalError(err) { framework.Failf("expect an internal error, got %#v", err) } } -func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerValidatingWebhookForWebhookConfigurations(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { var err error client := f.ClientSet ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") @@ -1276,7 +1276,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c // This webhook denies all requests to Delete validating webhook configuration and // mutating webhook configuration objects. It should never be called, however, because // dynamic admission webhooks should not be called on requests involving webhook configuration objects. - _, err = createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err = createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1317,12 +1317,12 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c }) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerMutatingWebhookForWebhookConfigurations(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { var err error client := f.ClientSet ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") @@ -1334,7 +1334,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con // This webhook adds a label to all requests create to validating webhook configuration and // mutating webhook configuration objects. It should never be called, however, because // dynamic admission webhooks should not be called on requests involving webhook configuration objects. - _, err = createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err = createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1375,7 +1375,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con }) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } @@ -1383,7 +1383,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con // This test assumes that the deletion-rejecting webhook defined in // registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating // webhook defined in registerMutatingWebhookForWebhookConfigurations already exist. -func testWebhooksForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func testWebhooksForWebhookConfigurations(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { var err error client := f.ClientSet ginkgo.By("Creating a dummy validating-webhook-configuration object") @@ -1392,7 +1392,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str failurePolicy := admissionregistrationv1.Ignore sideEffectsNone := admissionregistrationv1.SideEffectClassNone - mutatedValidatingWebhookConfiguration, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + mutatedValidatingWebhookConfiguration, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1438,17 +1438,17 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str framework.Failf("expected %s not to be mutated by mutating webhooks but it was", configName) } - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.By("Deleting the validating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) ginkgo.By("Creating a dummy mutating-webhook-configuration object") - mutatedMutatingWebhookConfiguration, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + mutatedMutatingWebhookConfiguration, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1494,18 +1494,18 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str framework.Failf("expected %s not to be mutated by mutating webhooks but it was", configName) } - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.By("Deleting the mutating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } -func createNamespace(f *framework.Framework, ns *v1.Namespace) error { - return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - _, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) +func createNamespace(ctx context.Context, f *framework.Framework, ns *v1.Namespace) error { + return wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { + _, err := f.ClientSet.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) if err != nil { if strings.HasPrefix(err.Error(), "object is being deleted:") { return false, nil @@ -1606,15 +1606,15 @@ func nonCompliantConfigMapPatch() string { type updateConfigMapFn func(cm *v1.ConfigMap) -func updateConfigMap(c clientset.Interface, ns, name string, update updateConfigMapFn) (*v1.ConfigMap, error) { +func updateConfigMap(ctx context.Context, c clientset.Interface, ns, name string, update updateConfigMapFn) (*v1.ConfigMap, error) { var cm *v1.ConfigMap pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) { var err error - if cm, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if cm, err = c.CoreV1().ConfigMaps(ns).Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } update(cm) - if cm, err = c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}); err == nil { + if cm, err = c.CoreV1().ConfigMaps(ns).Update(ctx, cm, metav1.UpdateOptions{}); err == nil { return true, nil } // Only retry update on conflict @@ -1628,15 +1628,15 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig type updateCustomResourceFn func(cm *unstructured.Unstructured) -func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update updateCustomResourceFn) (*unstructured.Unstructured, error) { +func updateCustomResource(ctx context.Context, c dynamic.ResourceInterface, ns, name string, update updateCustomResourceFn) (*unstructured.Unstructured, error) { var cr *unstructured.Unstructured pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) { var err error - if cr, err = c.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if cr, err = c.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } update(cr) - if cr, err = c.Update(context.TODO(), cr, metav1.UpdateOptions{}); err == nil { + if cr, err = c.Update(ctx, cr, metav1.UpdateOptions{}); err == nil { return true, nil } // Only retry update on conflict @@ -1648,21 +1648,21 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u return cr, pollErr } -func cleanWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) - _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{}) +func cleanWebhookTest(ctx context.Context, client clientset.Interface, namespaceName string) { + _ = client.CoreV1().Services(namespaceName).Delete(ctx, serviceName, metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespaceName).Delete(ctx, deploymentName, metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespaceName).Delete(ctx, secretName, metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(ctx, roleBindingName, metav1.DeleteOptions{}) } -func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) { +func registerWebhookForCustomResource(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API") namespace := f.Namespace.Name sideEffectsNone := admissionregistrationv1.SideEffectClassNone - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1699,19 +1699,19 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string, }) framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) { +func registerMutatingWebhookForCustomResource(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) { client := f.ClientSet ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name)) namespace := f.Namespace.Name sideEffectsNone := admissionregistrationv1.SideEffectClassNone - _, err := createMutatingWebhookConfiguration(f, &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := createMutatingWebhookConfiguration(ctx, f, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -1774,13 +1774,13 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName }) framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { +func testCustomResourceWebhook(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { ginkgo.By("Creating a custom resource that should be denied by the webhook") crInstanceName := "cr-instance-1" crInstance := &unstructured.Unstructured{ @@ -1796,7 +1796,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.Cust }, }, } - _, err := customResourceClient.Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err := customResourceClient.Create(ctx, crInstance, metav1.CreateOptions{}) framework.ExpectError(err, "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name) expectedErrMsg := "the custom resource contains unwanted data" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -1804,7 +1804,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.Cust } } -func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { +func testBlockingCustomResourceUpdateDeletion(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { ginkgo.By("Creating a custom resource whose deletion would be denied by the webhook") crInstanceName := "cr-instance-2" crInstance := &unstructured.Unstructured{ @@ -1820,7 +1820,7 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex }, }, } - _, err := customResourceClient.Create(context.TODO(), crInstance, metav1.CreateOptions{}) + _, err := customResourceClient.Create(ctx, crInstance, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crInstanceName, f.Namespace.Name) ginkgo.By("Updating the custom resource with disallowed data should be denied") @@ -1831,7 +1831,7 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex data := cr.Object["data"].(map[string]interface{}) data["webhook-e2e-test"] = "webhook-disallow" } - _, err = updateCustomResource(customResourceClient, f.Namespace.Name, crInstanceName, toNonCompliantFn) + _, err = updateCustomResource(ctx, customResourceClient, f.Namespace.Name, crInstanceName, toNonCompliantFn) framework.ExpectError(err, "updating custom resource %s in namespace: %s should be denied", crInstanceName, f.Namespace.Name) expectedErrMsg := "the custom resource contains unwanted data" @@ -1840,7 +1840,7 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex } ginkgo.By("Deleting the custom resource should be denied") - err = customResourceClient.Delete(context.TODO(), crInstanceName, metav1.DeleteOptions{}) + err = customResourceClient.Delete(ctx, crInstanceName, metav1.DeleteOptions{}) framework.ExpectError(err, "deleting custom resource %s in namespace: %s should be denied", crInstanceName, f.Namespace.Name) expectedErrMsg1 := "the custom resource cannot be deleted because it contains unwanted key and value" if !strings.Contains(err.Error(), expectedErrMsg1) { @@ -1855,16 +1855,16 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex data := cr.Object["data"].(map[string]interface{}) data["webhook-e2e-test"] = "webhook-allow" } - _, err = updateCustomResource(customResourceClient, f.Namespace.Name, crInstanceName, toCompliantFn) + _, err = updateCustomResource(ctx, customResourceClient, f.Namespace.Name, crInstanceName, toCompliantFn) framework.ExpectNoError(err, "failed to update custom resource %s in namespace: %s", crInstanceName, f.Namespace.Name) ginkgo.By("Deleting the updated custom resource should be successful") - err = customResourceClient.Delete(context.TODO(), crInstanceName, metav1.DeleteOptions{}) + err = customResourceClient.Delete(ctx, crInstanceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete custom resource %s in namespace: %s", crInstanceName, f.Namespace.Name) } -func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface, prune bool) { +func testMutatingCustomResourceWebhook(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface, prune bool) { ginkgo.By("Creating a custom resource that should be mutated by the webhook") crName := "cr-instance-1" cr := &unstructured.Unstructured{ @@ -1880,7 +1880,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension }, }, } - mutatedCR, err := customResourceClient.Create(context.TODO(), cr, metav1.CreateOptions{}) + mutatedCR, err := customResourceClient.Create(ctx, cr, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) expectedCRData := map[string]interface{}{ "mutation-start": "yes", @@ -1894,7 +1894,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension } } -func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.TestCrd) { +func testMultiVersionCustomResourceWebhook(ctx context.Context, f *framework.Framework, testcrd *crd.TestCrd) { customResourceClient := testcrd.DynamicClients["v1"] ginkgo.By("Creating a custom resource while v1 is storage version") crName := "cr-instance-1" @@ -1911,7 +1911,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. }, }, } - _, err := customResourceClient.Create(context.TODO(), cr, metav1.CreateOptions{}) + _, err := customResourceClient.Create(ctx, cr, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name) ginkgo.By("Patching Custom Resource Definition to set v2 as storage") @@ -1937,12 +1937,12 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. ] } }` - _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch), metav1.PatchOptions{}) + _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name) ginkgo.By("Patching the custom resource while v2 is storage version") crDummyPatch := fmt.Sprint(`[{ "op": "add", "path": "/dummy", "value": "test" }]`) - mutatedCR, err := testcrd.DynamicClients["v2"].Patch(context.TODO(), crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{}) + mutatedCR, err := testcrd.DynamicClients["v2"].Patch(ctx, crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name) expectedCRData := map[string]interface{}{ "mutation-start": "yes", @@ -1957,7 +1957,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. } } -func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { +func registerValidatingWebhookForCRD(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, servicePort int32) { client := f.ClientSet ginkgo.By("Registering the crd webhook via the AdmissionRegistration API") @@ -1968,7 +1968,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string, // label "webhook-e2e-test":"webhook-disallow" // NOTE: Because tests are run in parallel and in an unpredictable order, it is critical // that no other test attempts to create CRD with that label. - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -2005,12 +2005,12 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string, }) framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{}) } -func testCRDDenyWebhook(f *framework.Framework) { +func testCRDDenyWebhook(ctx context.Context, f *framework.Framework) { ginkgo.By("Creating a custom resource definition that should be denied by the webhook") name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, "deny") kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, "deny") @@ -2064,7 +2064,7 @@ func testCRDDenyWebhook(f *framework.Framework) { } // create CRD - _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) + _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{}) framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name) expectedErrMsg := "the crd contains unwanted label" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -2072,7 +2072,7 @@ func testCRDDenyWebhook(f *framework.Framework) { } } -func labelNamespace(f *framework.Framework, namespace string) { +func labelNamespace(ctx context.Context, f *framework.Framework, namespace string) { client := f.ClientSet // Add a unique label to the namespace @@ -2082,18 +2082,18 @@ func labelNamespace(f *framework.Framework, namespace string) { }, }) framework.ExpectNoError(err, "error marshaling namespace %s", namespace) - _, err = client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{}) + _, err = client.CoreV1().Namespaces().Patch(ctx, namespace, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{}) framework.ExpectNoError(err, "error labeling namespace %s", namespace) } -func registerSlowWebhook(f *framework.Framework, configName string, certCtx *certContext, policy *admissionregistrationv1.FailurePolicyType, timeout *int32, servicePort int32) func() { +func registerSlowWebhook(ctx context.Context, f *framework.Framework, configName string, certCtx *certContext, policy *admissionregistrationv1.FailurePolicyType, timeout *int32, servicePort int32) func(ctx context.Context) { client := f.ClientSet ginkgo.By("Registering slow webhook via the AdmissionRegistration API") namespace := f.Namespace.Name sideEffectsNone := admissionregistrationv1.SideEffectClassNone - _, err := createValidatingWebhookConfiguration(f, &admissionregistrationv1.ValidatingWebhookConfiguration{ + _, err := createValidatingWebhookConfiguration(ctx, f, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, }, @@ -2132,24 +2132,27 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer }) framework.ExpectNoError(err, "registering slow webhook config %s with namespace %s", configName, namespace) - err = waitWebhookConfigurationReady(f) + err = waitWebhookConfigurationReady(ctx, f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - cleanup := func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) + cleanup := func(ctx context.Context) { + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, configName, metav1.DeleteOptions{}) if !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } + // We clean up ourselves if the caller doesn't get to it, but we also + // give the caller a chance to do it in the middle of the test. + ginkgo.DeferCleanup(cleanup) return cleanup } -func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { +func testSlowWebhookTimeoutFailEarly(ctx context.Context, f *framework.Framework) { ginkgo.By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)") client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) framework.ExpectError(err, "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name) // http timeout message: context deadline exceeded // dial timeout message: dial tcp {address}: i/o timeout @@ -2160,12 +2163,12 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { } } -func testSlowWebhookTimeoutNoError(f *framework.Framework) { +func testSlowWebhookTimeoutNoError(ctx context.Context, f *framework.Framework) { client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } @@ -2214,7 +2217,7 @@ func servedAPIVersions(crd *apiextensionsv1.CustomResourceDefinition) []string { // createValidatingWebhookConfiguration ensures the webhook config scopes object or namespace selection // to avoid interfering with other tests, then creates the config. -func createValidatingWebhookConfiguration(f *framework.Framework, config *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { +func createValidatingWebhookConfiguration(ctx context.Context, f *framework.Framework, config *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { for _, webhook := range config.Webhooks { if webhook.NamespaceSelector != nil && webhook.NamespaceSelector.MatchLabels[f.UniqueName] == "true" { continue @@ -2224,12 +2227,12 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), config, metav1.CreateOptions{}) + return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(ctx, config, metav1.CreateOptions{}) } // createMutatingWebhookConfiguration ensures the webhook config scopes object or namespace selection // to avoid interfering with other tests, then creates the config. -func createMutatingWebhookConfiguration(f *framework.Framework, config *admissionregistrationv1.MutatingWebhookConfiguration) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { +func createMutatingWebhookConfiguration(ctx context.Context, f *framework.Framework, config *admissionregistrationv1.MutatingWebhookConfiguration) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { for _, webhook := range config.Webhooks { if webhook.NamespaceSelector != nil && webhook.NamespaceSelector.MatchLabels[f.UniqueName] == "true" { continue @@ -2239,7 +2242,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), config, metav1.CreateOptions{}) + return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, config, metav1.CreateOptions{}) } func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { @@ -2341,8 +2344,8 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certConte // createWebhookConfigurationReadyNamespace creates a separate namespace for webhook configuration ready markers to // prevent cross-talk with webhook configurations being tested. -func createWebhookConfigurationReadyNamespace(f *framework.Framework) { - ns, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ +func createWebhookConfigurationReadyNamespace(ctx context.Context, f *framework.Framework) { + ns, err := f.ClientSet.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: f.Namespace.Name + "-markers", Labels: map[string]string{ @@ -2359,7 +2362,7 @@ func createWebhookConfigurationReadyNamespace(f *framework.Framework) { // waitWebhookConfigurationReady sends "marker" requests until a webhook configuration is ready. // A webhook created with newValidatingIsReadyWebhookFixture or newMutatingIsReadyWebhookFixture should first be added to // the webhook configuration. -func waitWebhookConfigurationReady(f *framework.Framework) error { +func waitWebhookConfigurationReady(ctx context.Context, f *framework.Framework) error { cmClient := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name + "-markers") return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { marker := &v1.ConfigMap{ @@ -2370,7 +2373,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { }, }, } - _, err := cmClient.Create(context.TODO(), marker, metav1.CreateOptions{}) + _, err := cmClient.Create(ctx, marker, metav1.CreateOptions{}) if err != nil { // The always-deny webhook does not provide a reason, so check for the error string we expect if strings.Contains(err.Error(), "denied") { @@ -2379,7 +2382,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { return false, err } // best effort cleanup of markers that are no longer needed - _ = cmClient.Delete(context.TODO(), marker.GetName(), metav1.DeleteOptions{}) + _ = cmClient.Delete(ctx, marker.GetName(), metav1.DeleteOptions{}) framework.Logf("Waiting for webhook configuration to be ready...") return false, nil }) diff --git a/test/e2e/apps/controller_revision.go b/test/e2e/apps/controller_revision.go index 87a499e9056..bde987f16e8 100644 --- a/test/e2e/apps/controller_revision.go +++ b/test/e2e/apps/controller_revision.go @@ -55,29 +55,29 @@ const ( var _ = SIGDescribe("ControllerRevision [Serial]", func() { var f *framework.Framework - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Clean up - daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } - if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { + if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) } - err = clearDaemonSetNodeLabels(f.ClientSet) + err = clearDaemonSetNodeLabels(ctx, f.ClientSet) framework.ExpectNoError(err) }) @@ -90,17 +90,17 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { var ns string var c clientset.Interface - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name c = f.ClientSet - updatedNS, err := patchNamespaceAnnotations(c, ns) + updatedNS, err := patchNamespaceAnnotations(ctx, c, ns) framework.ExpectNoError(err) ns = updatedNS.Name - err = clearDaemonSetNodeLabels(c) + err = clearDaemonSetNodeLabels(ctx, c) framework.ExpectNoError(err) }) @@ -128,26 +128,26 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { dsLabelSelector := labels.SelectorFromSet(dsLabel).String() ginkgo.By(fmt.Sprintf("Creating DaemonSet %q", dsName)) - testDaemonset, err := csAppsV1.DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{}) + testDaemonset, err := csAppsV1.DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector)) - dsList, err := csAppsV1.DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector}) + dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector}) framework.ExpectNoError(err, "failed to list Daemon Sets") framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") - ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), dsName, metav1.GetOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{}) framework.ExpectNoError(err) // Listing across all namespaces to verify api endpoint: listAppsV1ControllerRevisionForAllNamespaces ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector)) - revs, err := csAppsV1.ControllerRevisions("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector}) + revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector}) framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err) framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions") @@ -158,14 +158,14 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { oref := rev.OwnerReferences[0] if oref.Kind == "DaemonSet" && oref.UID == ds.UID { framework.Logf("Located ControllerRevision: %q", rev.Name) - initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), rev.Name, metav1.GetOptions{}) + initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, rev.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to lookup ControllerRevision: %v", err) framework.ExpectNotEqual(initialRevision, nil, "failed to lookup ControllerRevision: %v", initialRevision) } ginkgo.By(fmt.Sprintf("Patching ControllerRevision %q", initialRevision.Name)) payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}" - patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(context.TODO(), initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) + patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns) framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels) framework.Logf("%s has been patched", patchedControllerRevision.Name) @@ -184,33 +184,33 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { Data: initialRevision.Data, Revision: initialRevision.Revision + 1, } - newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(context.TODO(), newRevision, metav1.CreateOptions{}) + newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(ctx, newRevision, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create ControllerRevision: %v", err) framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name) ginkgo.By("Confirm that there are two ControllerRevisions") - err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) + err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name)) - err = csAppsV1.ControllerRevisions(ns).Delete(context.TODO(), initialRevision.Name, metav1.DeleteOptions{}) + err = csAppsV1.ControllerRevisions(ns).Delete(ctx, initialRevision.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) ginkgo.By("Confirm that there is only one ControllerRevision") - err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) + err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") - listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{}) + listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) currentControllerRevision := listControllerRevisions.Items[0] ginkgo.By(fmt.Sprintf("Updating ControllerRevision %q", currentControllerRevision.Name)) var updatedControllerRevision *appsv1.ControllerRevision err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), currentControllerRevision.Name, metav1.GetOptions{}) + updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, currentControllerRevision.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get ControllerRevision %s", currentControllerRevision.Name) updatedControllerRevision.Labels[currentControllerRevision.Name] = "updated" - updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(context.TODO(), updatedControllerRevision, metav1.UpdateOptions{}) + updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(ctx, updatedControllerRevision, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns) @@ -220,38 +220,38 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { ginkgo.By("Generate another ControllerRevision by patching the Daemonset") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"terminationGracePeriodSeconds": %d}}},"updateStrategy":{"type":"RollingUpdate"}}`, 1) - _, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + _, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "error patching daemon set") ginkgo.By("Confirm that there are two ControllerRevisions") - err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) + err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"} updatedLabelSelector := labels.SelectorFromSet(updatedLabel).String() ginkgo.By(fmt.Sprintf("Removing a ControllerRevision via 'DeleteCollection' with labelSelector: %q", updatedLabelSelector)) - err = csAppsV1.ControllerRevisions(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector}) + err = csAppsV1.ControllerRevisions(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector}) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) ginkgo.By("Confirm that there is only one ControllerRevision") - err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) + err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") - list, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{}) + list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list ControllerRevision") framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller") framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision) }) }) -func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { - return func() (bool, error) { +func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { var err error framework.Logf("Requesting list of ControllerRevisions to confirm quantity") - list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: label}) if err != nil { return false, err diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index ef98644da0e..5fbc5cf4186 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -70,21 +70,21 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, sleepCommand, nil, nil) - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring more than one job is running at a time") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2) framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2)) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -99,20 +99,20 @@ var _ = SIGDescribe("CronJob", func() { sleepCommand, nil, nil) t := true cronJob.Spec.Suspend = &t - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring no jobs are scheduled") - err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false) + err = waitForNoJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, false) framework.ExpectError(err) ginkgo.By("Ensuring no job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) gomega.Expect(jobs.Items).To(gomega.HaveLen(0)) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -125,30 +125,30 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring a job is scheduled") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1) framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name) ginkgo.By("Ensuring exactly one is scheduled") - cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(activeJobs).To(gomega.HaveLen(1)) ginkgo.By("Ensuring no more jobs are scheduled") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2) framework.ExpectError(err) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -161,30 +161,30 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Creating a ReplaceConcurrent cronjob") cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent, sleepCommand, nil, nil) - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring a job is scheduled") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1) framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring exactly one is scheduled") - cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(activeJobs).To(gomega.HaveLen(1)) ginkgo.By("Ensuring the job is replaced with a new one") - err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name) + err = waitForJobReplaced(ctx, f.ClientSet, f.Namespace.Name, jobs.Items[0].Name) framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -196,21 +196,21 @@ var _ = SIGDescribe("CronJob", func() { lastScheduleTime := creationTime.Add(1 * 24 * time.Hour) cronJob.CreationTimestamp = metav1.Time{Time: creationTime} cronJob.Status.LastScheduleTime = &metav1.Time{Time: lastScheduleTime} - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring one job is running") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1) framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring at least one running jobs exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 1)) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -219,21 +219,21 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, nil, nil, nil) - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") - err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2) + err = waitForJobsAtLeast(ctx, f.ClientSet, f.Namespace.Name, 2) framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name) - err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name) + err = waitForAnyFinishedJob(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring no unexpected event has happened") - err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) + err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) framework.ExpectError(err) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -242,37 +242,37 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring a job is scheduled") - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) + err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1) framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring exactly one is scheduled") - cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) ginkgo.By("Deleting the job") job := cronJob.Status.Active[0] - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) ginkgo.By("Ensuring job was deleted") - _, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) + _, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err) framework.ExpectEqual(apierrors.IsNotFound(err), true) ginkgo.By("Ensuring the job is not in the cronjob active list") - err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) + err = waitForJobNotActive(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring MissingJob event has occurred") - err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) + err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Removing cronjob") - err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) + err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name) framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) }) @@ -284,7 +284,7 @@ var _ = SIGDescribe("CronJob", func() { cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent, successCommand, &successLimit, &failedLimit) - ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob) + ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob) }) // cleanup of failed finished jobs, with limit of one failed job @@ -295,7 +295,7 @@ var _ = SIGDescribe("CronJob", func() { cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent, failureCommand, &successLimit, &failedLimit) - ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob) + ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob) }) ginkgo.It("should support timezone", func(ctx context.Context) { @@ -304,7 +304,7 @@ var _ = SIGDescribe("CronJob", func() { failureCommand, nil, nil) badTimeZone := "bad-time-zone" cronJob.Spec.TimeZone = &badTimeZone - _, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) + _, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob) framework.ExpectError(err, "CronJob creation should fail with invalid time zone error") framework.ExpectEqual(apierrors.IsInvalid(err), true, "CronJob creation should fail with invalid time zone error") }) @@ -331,38 +331,38 @@ var _ = SIGDescribe("CronJob", func() { cjClient := f.ClientSet.BatchV1().CronJobs(ns) ginkgo.By("creating") - createdCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{}) + createdCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenCronJob, err := cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) + gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID) ginkgo.By("listing") - cjs, err := cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item") ginkgo.By("watching") framework.Logf("starting watch") - cjWatch, err := cjClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + cjWatch, err := cjClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Test cluster-wide list and watch clusterCJClient := f.ClientSet.BatchV1().CronJobs("") ginkgo.By("cluster-wide listing") - clusterCJs, err := clusterCJClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") - _, err = clusterCJClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + _, err = clusterCJClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedCronJob, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType, + patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation") @@ -370,12 +370,12 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("updating") var cjToUpdate, updatedCronJob *batchv1.CronJob err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - cjToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) + cjToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{}) if err != nil { return err } cjToUpdate.Annotations["updated"] = "true" - updatedCronJob, err = cjClient.Update(context.TODO(), cjToUpdate, metav1.UpdateOptions{}) + updatedCronJob, err = cjClient.Update(ctx, cjToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -410,7 +410,7 @@ var _ = SIGDescribe("CronJob", func() { } cjStatusJSON, err := json.Marshal(cjStatus) framework.ExpectNoError(err) - patchedStatus, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType, + patchedStatus, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) @@ -422,12 +422,12 @@ var _ = SIGDescribe("CronJob", func() { now2 := metav1.Now().Rfc3339Copy() var statusToUpdate, updatedStatus *batchv1.CronJob err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) + statusToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{}) if err != nil { return err } statusToUpdate.Status.LastScheduleTime = &now2 - updatedStatus, err = cjClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = cjClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -435,7 +435,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("get /status") cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"} - gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}, "status") + gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(ctx, createdCronJob.Name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") framework.ExpectNoError(err) @@ -449,11 +449,11 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("deleting") cjTemplate.Name = "for-removal" - forRemovalCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{}) + forRemovalCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = cjClient.Delete(context.TODO(), forRemovalCronJob.Name, metav1.DeleteOptions{}) + err = cjClient.Delete(ctx, forRemovalCronJob.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - cj, err := cjClient.Get(context.TODO(), forRemovalCronJob.Name, metav1.GetOptions{}) + cj, err := cjClient.Get(ctx, forRemovalCronJob.Name, metav1.GetOptions{}) // If controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior. if err == nil { expectFinalizer(cj, "deleting cronjob") @@ -462,9 +462,9 @@ var _ = SIGDescribe("CronJob", func() { } ginkgo.By("deleting a collection") - err = cjClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = cjClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - cjs, err = cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + cjs, err = cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2") @@ -476,19 +476,19 @@ var _ = SIGDescribe("CronJob", func() { }) -func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.CronJob) { - cronJob, err := createCronJob(c, ns, cronJob) +func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) { + cronJob, err := createCronJob(ctx, c, ns, cronJob) framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", ns) // Job is going to complete instantly: do not check for an active job // as we are most likely to miss it ginkgo.By("Ensuring a finished job exists") - err = waitForAnyFinishedJob(c, ns) + err = waitForAnyFinishedJob(ctx, c, ns) framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns) ginkgo.By("Ensuring a finished job exists by listing jobs explicitly") - jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns) activeJobs, finishedJobs := filterActiveJobs(jobs) if len(finishedJobs) != 1 { @@ -498,13 +498,13 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron // Job should get deleted when the next job finishes the next minute ginkgo.By("Ensuring this job and its pods does not exist anymore") - err = waitForJobToDisappear(c, ns, finishedJobs[0]) + err = waitForJobToDisappear(ctx, c, ns, finishedJobs[0]) framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", ns) - err = waitForJobsPodToDisappear(c, ns, finishedJobs[0]) + err = waitForJobsPodToDisappear(ctx, c, ns, finishedJobs[0]) framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns) ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly") - jobs, err = c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) + jobs, err = c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns) activeJobs, finishedJobs = filterActiveJobs(jobs) if len(finishedJobs) != 1 { @@ -513,7 +513,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron } ginkgo.By("Removing cronjob") - err = deleteCronJob(c, ns, cronJob.Name) + err = deleteCronJob(ctx, c, ns, cronJob.Name) framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, ns) } @@ -575,23 +575,23 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1.Concurrency return sj } -func createCronJob(c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) { - return c.BatchV1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) +func createCronJob(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) { + return c.BatchV1().CronJobs(ns).Create(ctx, cronJob, metav1.CreateOptions{}) } -func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error) { - return c.BatchV1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func getCronJob(ctx context.Context, c clientset.Interface, ns, name string) (*batchv1.CronJob, error) { + return c.BatchV1().CronJobs(ns).Get(ctx, name, metav1.GetOptions{}) } -func deleteCronJob(c clientset.Interface, ns, name string) error { +func deleteCronJob(ctx context.Context, c clientset.Interface, ns, name string) error { propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob - return c.BatchV1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + return c.BatchV1().CronJobs(ns).Delete(ctx, name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) } // Wait for at least given amount of active jobs. -func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - curr, err := getCronJob(c, ns, cronJobName) +func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobName string, active int) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + curr, err := getCronJob(ctx, c, ns, cronJobName) if err != nil { return false, err } @@ -603,9 +603,9 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int // When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after // the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still // empty after the timeout. -func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - curr, err := getCronJob(c, ns, jobName) +func waitForNoJobs(ctx context.Context, c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + curr, err := getCronJob(ctx, c, ns, jobName) if err != nil { return false, err } @@ -618,9 +618,9 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo } // Wait till a given job actually goes away from the Active list for a given cronjob -func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - curr, err := getCronJob(c, ns, cronJobName) +func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJobName, jobName string) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + curr, err := getCronJob(ctx, c, ns, cronJobName) if err != nil { return false, err } @@ -635,9 +635,9 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) } // Wait for a job to disappear by listing them explicitly. -func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) +func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -652,10 +652,10 @@ func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1. } // Wait for a pod to disappear by listing them explicitly. -func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { +func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns).List(ctx, options) if err != nil { return false, err } @@ -664,9 +664,9 @@ func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batc } // Wait for a job to be replaced with a new one. -func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) +func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previousJobName string) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -683,9 +683,9 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error } // waitForJobsAtLeast waits for at least a number of jobs to appear. -func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) +func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, atLeast int) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -694,9 +694,9 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { } // waitForAnyFinishedJob waits for any completed job to appear. -func waitForAnyFinishedJob(c clientset.Interface, ns string) error { - return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) +func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string) error { + return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) { + jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -710,9 +710,9 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error { } // waitForEventWithReason waits for events with a reason within a list has occurred -func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error { - return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) { - sj, err := getCronJob(c, ns, cronJobName) +func waitForEventWithReason(ctx context.Context, c clientset.Interface, ns, cronJobName string, reasons []string) error { + return wait.PollWithContext(ctx, framework.Poll, 30*time.Second, func(ctx context.Context) (bool, error) { + sj, err := getCronJob(ctx, c, ns, cronJobName) if err != nil { return false, err } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 62f63d4da45..c9845b143d1 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -97,7 +97,7 @@ func (r *RestartDaemonConfig) String() string { } // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout -func (r *RestartDaemonConfig) waitUp() { +func (r *RestartDaemonConfig) waitUp(ctx context.Context) { framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) nullDev := "/dev/null" if framework.NodeOSDistroIs("windows") { @@ -112,8 +112,8 @@ func (r *RestartDaemonConfig) waitUp() { "curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort) } - err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) { - result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider) + err := wait.PollWithContext(ctx, r.pollInterval, r.pollTimeout, func(ctx context.Context) (bool, error) { + result, err := e2essh.NodeExec(ctx, r.nodeName, healthzCheck, framework.TestContext.Provider) if err != nil { return false, err } @@ -133,21 +133,21 @@ func (r *RestartDaemonConfig) waitUp() { } // kill sends a SIGTERM to the daemon -func (r *RestartDaemonConfig) kill() { +func (r *RestartDaemonConfig) kill(ctx context.Context) { killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName) if framework.NodeOSDistroIs("windows") { killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName) } framework.Logf("Killing %v", r) - _, err := e2essh.NodeExec(r.nodeName, killCmd, framework.TestContext.Provider) + _, err := e2essh.NodeExec(ctx, r.nodeName, killCmd, framework.TestContext.Provider) framework.ExpectNoError(err) } // Restart checks if the daemon is up, kills it, and waits till it comes back up -func (r *RestartDaemonConfig) restart() { - r.waitUp() - r.kill() - r.waitUp() +func (r *RestartDaemonConfig) restart(ctx context.Context) { + r.waitUp(ctx) + r.kill(ctx) + r.waitUp(ctx) } // podTracker records a serial history of events that might've affects pods. @@ -190,9 +190,9 @@ func replacePods(pods []*v1.Pod, store cache.Store) { // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // and a list of nodenames across which these containers restarted. -func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { +func getContainerRestarts(ctx context.Context, c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { options := metav1.ListOptions{LabelSelector: labelSelector.String()} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns).List(ctx, options) framework.ExpectNoError(err) failedContainers := 0 containerRestartNodes := sets.NewString() @@ -219,7 +219,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { var stopCh chan struct{} var tracker *podTracker - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // These tests require SSH e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) ns = f.Namespace.Name @@ -234,7 +234,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { Replicas: numPods, CreatedPods: &[]*v1.Pod{}, } - framework.ExpectNoError(e2erc.RunRC(config)) + framework.ExpectNoError(e2erc.RunRC(ctx, config)) replacePods(*config.CreatedPods, existingPods) stopCh = make(chan struct{}) @@ -243,12 +243,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector.String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector.String() - return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options) }, }, &v1.Pod{}, @@ -278,14 +278,14 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { e2eskipper.SkipUnlessProviderIs("gce", "aws") restarter := NewRestartConfig( framework.APIAddress(), "kube-controller", ports.KubeControllerManagerPort, restartPollInterval, restartTimeout, true) - restarter.restart() + restarter.restart(ctx) // The intent is to ensure the replication controller manager has observed and reported status of // the replication controller at least once since the manager restarted, so that we can determine // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true) + e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -312,39 +312,39 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { // Create pods while the scheduler is down and make sure the scheduler picks them up by // scaling the rc to the same size. - restarter.waitUp() - restarter.kill() + restarter.waitUp(ctx) + restarter.kill(ctx) // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false)) - restarter.waitUp() - framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) + framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false)) + restarter.waitUp(ctx) + framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) }) ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) { - nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) + nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } framework.ExpectNoErrorWithOffset(0, err) - preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) + preRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector) if preRestarts != 0 { framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) } for _, ip := range nodeIPs { restarter := NewRestartConfig( ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout, false) - restarter.restart() + restarter.restart(ctx) } - postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) + postRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector) if postRestarts != preRestarts { - e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) + e2edebug.DumpNodeDebugInfo(ctx, f.ClientSet, badNodes, framework.Logf) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) { - nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) + nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } @@ -353,7 +353,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout, false) // restart method will kill the kube-proxy process and wait for recovery, // if not able to recover, will throw test failure. - restarter.restart() + restarter.restart(ctx) } }) }) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 0a8ee35c7c1..b077ce32a51 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -78,16 +78,16 @@ type updateDSFunc func(*appsv1.DaemonSet) // updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func // until it succeeds or a timeout expires. -func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) { +func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) { daemonsets := c.AppsV1().DaemonSets(namespace) var updateErr error - pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if ds, err = daemonsets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + pollErr := wait.PollImmediateWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(ds) - if ds, err = daemonsets.Update(context.TODO(), ds, metav1.UpdateOptions{}); err == nil { + if ds, err = daemonsets.Update(ctx, ds, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating DaemonSet %s", name) return true, nil } @@ -108,29 +108,29 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a var _ = SIGDescribe("Daemon set [Serial]", func() { var f *framework.Framework - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Clean up - daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } - if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { + if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) } - err = clearDaemonSetNodeLabels(f.ClientSet) + err = clearDaemonSetNodeLabels(ctx, f.ClientSet) framework.ExpectNoError(err) }) @@ -143,17 +143,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { var ns string var c clientset.Interface - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name c = f.ClientSet - updatedNS, err := patchNamespaceAnnotations(c, ns) + updatedNS, err := patchNamespaceAnnotations(ctx, c, ns) framework.ExpectNoError(err) ns = updatedNS.Name - err = clearDaemonSetNodeLabels(c) + err = clearDaemonSetNodeLabels(ctx, c) framework.ExpectNoError(err) }) @@ -167,21 +167,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") - podList := listDaemonPods(c, ns, label) + podList := listDaemonPods(ctx, c, ns, label) pod := podList.Items[0] - err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") }) @@ -197,42 +197,42 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating daemon %q with a node selector", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) - newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) + newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") nodeSelector[daemonsetColorLabel] = "green" - greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) + greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error removing labels on node") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) }) @@ -260,29 +260,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, }, } - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) - newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) + newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("Remove the node label and wait for daemons to be unscheduled") - _, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{}) + _, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{}) framework.ExpectNoError(err, "error removing labels on node") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") }) @@ -295,27 +295,27 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") - podList := listDaemonPods(c, ns, label) + podList := listDaemonPods(ctx, c, ns, label) pod := podList.Items[0] pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod, metav1.UpdateOptions{}) + _, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{}) framework.ExpectNoError(err, "error failing a daemon pod") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") ginkgo.By("Wait for the failed daemon pod to be completely deleted.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted") }) @@ -327,43 +327,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 1) - first := curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 1) + first := curHistory(listDaemonHistories(ctx, c, ns, label), ds) firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] framework.ExpectEqual(first.Revision, int64(1)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash) ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods images aren't updated.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 2) - cur := curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 2) + cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) framework.ExpectEqual(cur.Revision, int64(2)) framework.ExpectNotEqual(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey], firstHash) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash) }) /* @@ -377,50 +377,50 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 1) - cur := curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 1) + cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] framework.ExpectEqual(cur.Revision, int64(1)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Get the number of nodes, and set the timeout appropriately. - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) nodeCount := len(nodes.Items) retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second ginkgo.By("Check that daemon pods images are updated.") - err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 2) - cur = curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 2) + cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] framework.ExpectEqual(cur.Revision, int64(2)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) }) /* @@ -430,33 +430,33 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { rollback of updates to a DaemonSet. */ framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) { - schedulableNodes, err := e2enode.GetReadySchedulableNodes(c) + schedulableNodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") framework.Logf("Create a RollingUpdate DaemonSet") label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Check that daemon pods launch on every node of the cluster") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.Logf("Update the DaemonSet to trigger a rollout") // We use a nonexistent image here, so that we make sure it won't finish newImage := "foo:non-existent" - newDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { + newDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) framework.ExpectNoError(err) // Make sure we're in the middle of a rollout - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) framework.ExpectNoError(err) - pods := listDaemonPods(c, ns, label) + pods := listDaemonPods(ctx, c, ns, label) var existingPods, newPods []*v1.Pod for i := range pods.Items { pod := pods.Items[i] @@ -470,7 +470,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Failf("unexpected pod found, image = %s", image) } } - schedulableNodes, err = e2enode.GetReadySchedulableNodes(c) + schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) if len(schedulableNodes.Items) < 2 { framework.ExpectEqual(len(existingPods), 0) @@ -480,17 +480,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNotEqual(len(newPods), 0) framework.Logf("Roll back the DaemonSet before rollout is complete") - rollbackDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { + rollbackDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = image }) framework.ExpectNoError(err) framework.Logf("Make sure DaemonSet rollback is complete") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) framework.ExpectNoError(err) // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted - pods = listDaemonPods(c, ns, label) + pods = listDaemonPods(ctx, c, ns, label) rollbackPods := map[string]bool{} for _, pod := range pods.Items { rollbackPods[pod.Name] = true @@ -545,31 +545,31 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // The pod must be ready for at least 10s before we delete the old pod ds.Spec.MinReadySeconds = 10 - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 1) - cur := curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 1) + cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] framework.ExpectEqual(cur.Revision, int64(1)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) newVersion := "2" ginkgo.By("Update daemon pods environment var") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%s"}]}]}}}}`, ds.Spec.Template.Spec.Containers[0].Name, newVersion) - ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Get the number of nodes, and set the timeout appropriately. - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) nodeCount := len(nodes.Items) retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second @@ -577,8 +577,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout") ageOfOldPod := make(map[string]time.Time) deliberatelyDeletedPods := sets.NewString() - err = wait.PollImmediate(dsRetryPeriod, retryTimeout, func() (bool, error) { - podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, func(ctx context.Context) (bool, error) { + podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -749,7 +749,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { } // Make sure every daemon pod on the node has been updated - nodeNames := e2edaemonset.SchedulableNodes(c, ds) + nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds) for _, node := range nodeNames { switch { case @@ -782,7 +782,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { if pod := randomPod(pods, func(pod *v1.Pod) bool { return pod.DeletionTimestamp == nil }); pod != nil { - if err := c.CoreV1().Pods(ds.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ds.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil { framework.Logf("Failed to delete pod %s early: %v", pod.Name, err) } else { framework.Logf("Deleted pod %s prematurely", pod.Name) @@ -800,17 +800,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - waitForHistoryCreated(c, ns, label, 2) - cur = curHistory(listDaemonHistories(c, ns, label), ds) + waitForHistoryCreated(ctx, c, ns, label, 2) + cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] framework.ExpectEqual(cur.Revision, int64(2)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) + checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) }) /* @@ -829,26 +829,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { one := int64(1) ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) + testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("listing all DaemonSets") - dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Daemon Sets") framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") ginkgo.By("DeleteCollection of the DaemonSets") - err = dsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector}) + err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to delete DaemonSets") ginkgo.By("Verify that ReplicaSets have been deleted") - dsList, err = c.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list DaemonSets") framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset") }) @@ -869,26 +869,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return dsClient.Watch(context.TODO(), options) + return dsClient.Watch(ctx, options) }, } - dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Daemon Sets") ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) + testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(f, dsName) + err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) ginkgo.By("Getting /status") dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"} - dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(context.TODO(), dsName, metav1.GetOptions{}, "status") + dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(ctx, dsName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch the status of daemon set %s in namespace %s", dsName, ns) dsStatusBytes, err := json.Marshal(dsStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) @@ -902,7 +902,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { var statusToUpdate, updatedStatus *appsv1.DaemonSet err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = dsClient.Get(context.TODO(), dsName, metav1.GetOptions{}) + statusToUpdate, err = dsClient.Get(ctx, dsName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to retrieve daemon set %s", dsName) statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DaemonSetCondition{ @@ -912,16 +912,16 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Message: "Set from e2e test", }) - updatedStatus, err = dsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = dsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update status. %v", err) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the daemon set status to be updated") - ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, dsRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if ds, ok := event.Object.(*appsv1.DaemonSet); ok { found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name && ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace && @@ -961,13 +961,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { payload, err := json.Marshal(daemonSetStatusPatch) framework.ExpectNoError(err, "Failed to marshal JSON. %v", err) - _, err = dsClient.Patch(context.TODO(), dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") + _, err = dsClient.Patch(ctx, dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch daemon set status", err) ginkgo.By("watching for the daemon set status to be patched") - ctx, cancel = context.WithTimeout(context.Background(), dsRetryTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, dsRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if ds, ok := event.Object.(*appsv1.DaemonSet); ok { found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name && ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace && @@ -1021,10 +1021,10 @@ func newDaemonSetWithLabel(dsName, image string, label map[string]string) *appsv return e2edaemonset.NewDaemonSet(dsName, image, label, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}}) } -func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList { +func listDaemonPods(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *v1.PodList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + podList, err := c.CoreV1().Pods(ns).List(ctx, options) framework.ExpectNoError(err) gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0)) return podList @@ -1043,13 +1043,13 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m return daemonSetLabels, otherLabels } -func clearDaemonSetNodeLabels(c clientset.Interface) error { - nodeList, err := e2enode.GetReadySchedulableNodes(c) +func clearDaemonSetNodeLabels(ctx context.Context, c clientset.Interface) error { + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c) if err != nil { return err } for _, node := range nodeList.Items { - _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) + _, err := setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{}) if err != nil { return err } @@ -1058,7 +1058,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error { } // patchNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty -func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) { +func patchNamespaceAnnotations(ctx context.Context, c clientset.Interface, nsName string) (*v1.Namespace, error) { nsClient := c.CoreV1().Namespaces() annotations := make(map[string]string) @@ -1074,15 +1074,15 @@ func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namesp return nil, err } - return nsClient.Patch(context.TODO(), nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{}) + return nsClient.Patch(ctx, nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{}) } -func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { +func setDaemonSetNodeLabels(ctx context.Context, c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { nodeClient := c.CoreV1().Nodes() var newNode *v1.Node var newLabels map[string]string - err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { - node, err := nodeClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, func(ctx context.Context) (bool, error) { + node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1097,7 +1097,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s for k, v := range labels { node.Labels[k] = v } - newNode, err = nodeClient.Update(context.TODO(), node, metav1.UpdateOptions{}) + newNode, err = nodeClient.Update(ctx, node, metav1.UpdateOptions{}) if err == nil { newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels) return true, err @@ -1117,15 +1117,15 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s return newNode, nil } -func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) { - return func() (bool, error) { - return e2edaemonset.CheckRunningOnAllNodes(f, ds) +func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + return e2edaemonset.CheckRunningOnAllNodes(ctx, f, ds) } } -func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) { - return func() (bool, error) { - pods := listDaemonPods(c, ns, label) +func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + pods := listDaemonPods(ctx, c, ns, label) for _, pod := range pods.Items { if pod.Spec.Containers[0].Image == newImage { return true, nil @@ -1135,13 +1135,13 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st } } -func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) { +func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) { return e2edaemonset.CheckDaemonPodOnNodes(f, ds, make([]string, 0)) } -func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) { - return func() (bool, error) { - podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) +func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -1172,7 +1172,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.Daemo return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable) } // Make sure every daemon pod on the node has been updated - nodeNames := e2edaemonset.SchedulableNodes(c, ds) + nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds) for _, node := range nodeNames { if nodesToUpdatedPodCount[node] == 0 { return false, nil @@ -1196,11 +1196,11 @@ func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) { } } -func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]string, numHistory int) { - listHistoryFn := func() (bool, error) { +func waitForHistoryCreated(ctx context.Context, c clientset.Interface, ns string, label map[string]string, numHistory int) { + listHistoryFn := func(ctx context.Context) (bool, error) { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) + historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options) if err != nil { return false, err } @@ -1210,14 +1210,14 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) return false, nil } - err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) + err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, listHistoryFn) framework.ExpectNoError(err, "error waiting for controllerrevisions to be created") } -func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList { +func listDaemonHistories(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) + historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options) framework.ExpectNoError(err) gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0)) return historyList @@ -1242,9 +1242,9 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet return curHistory } -func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) { - return func() (bool, error) { - if _, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { +func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + if _, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return true, nil } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 3fdeb88ddf0..a5b06f3bcbb 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -81,8 +81,8 @@ var _ = SIGDescribe("Deployment", func() { var c clientset.Interface var dc dynamic.Interface - ginkgo.AfterEach(func() { - failureTrap(c, ns) + ginkgo.AfterEach(func(ctx context.Context) { + failureTrap(ctx, c, ns) }) f := framework.NewDefaultFramework("deployment") @@ -95,7 +95,7 @@ var _ = SIGDescribe("Deployment", func() { }) ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) { - testDeleteDeployment(f) + testDeleteDeployment(ctx, f) }) /* Release: v1.12 @@ -103,7 +103,7 @@ var _ = SIGDescribe("Deployment", func() { Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy. */ framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) { - testRollingUpdateDeployment(f) + testRollingUpdateDeployment(ctx, f) }) /* Release: v1.12 @@ -111,7 +111,7 @@ var _ = SIGDescribe("Deployment", func() { Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy. */ framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) { - testRecreateDeployment(f) + testRecreateDeployment(ctx, f) }) /* Release: v1.12 @@ -120,7 +120,7 @@ var _ = SIGDescribe("Deployment", func() { the Deployment's `.spec.revisionHistoryLimit`. */ framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) { - testDeploymentCleanUpPolicy(f) + testDeploymentCleanUpPolicy(ctx, f) }) /* Release: v1.12 @@ -130,13 +130,13 @@ var _ = SIGDescribe("Deployment", func() { before the rollout finishes. */ framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) { - testRolloverDeployment(f) + testRolloverDeployment(ctx, f) }) ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) { - testIterativeDeployments(f) + testIterativeDeployments(ctx, f) }) ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) { - testDeploymentsControllerRef(f) + testDeploymentsControllerRef(ctx, f) }) /* @@ -148,7 +148,7 @@ var _ = SIGDescribe("Deployment", func() { a scale subresource. */ framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) { - testDeploymentSubresources(f) + testDeploymentSubresources(ctx, f) }) /* Release: v1.12 @@ -158,15 +158,15 @@ var _ = SIGDescribe("Deployment", func() { when a Deployment is scaled. */ framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) { - testProportionalScalingDeployment(f) + testProportionalScalingDeployment(ctx, f) }) ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke") e2eskipper.SkipIfIPv6("aws") - nodes, err := e2enode.GetReadySchedulableNodes(c) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) e2eskipper.SkipUnlessAtLeast(len(nodes.Items), 3, "load-balancer test requires at least 3 schedulable nodes") - testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f) + testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx, f) }) // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues // See https://github.com/kubernetes/kubernetes/issues/29229 @@ -198,10 +198,10 @@ var _ = SIGDescribe("Deployment", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = testDeploymentLabelsFlat - return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(context.TODO(), options) + return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(ctx, options) }, } - deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) + deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) framework.ExpectNoError(err, "failed to list Deployments") ginkgo.By("creating a Deployment") @@ -211,13 +211,13 @@ var _ = SIGDescribe("Deployment", func() { testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"} testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one - _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(ctx, testDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) ginkgo.By("waiting for Deployment to be created") - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Added: if deployment, ok := event.Object.(*appsv1.Deployment); ok { @@ -233,9 +233,9 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Added) ginkgo.By("waiting for all Replicas to be Ready") - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if deployment, ok := event.Object.(*appsv1.Deployment); ok { found := deployment.ObjectMeta.Name == testDeployment.Name && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && @@ -269,11 +269,11 @@ var _ = SIGDescribe("Deployment", func() { }, }) framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") - _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Deployment") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if deployment, ok := event.Object.(*appsv1.Deployment); ok { @@ -292,9 +292,9 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Modified) ginkgo.By("waiting for Replicas to scale") - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if deployment, ok := event.Object.(*appsv1.Deployment); ok { found := deployment.ObjectMeta.Name == testDeployment.Name && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && @@ -313,7 +313,7 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas) ginkgo.By("listing Deployments") - deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) + deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) framework.ExpectNoError(err, "failed to list Deployments") foundDeployment := false for _, deploymentItem := range deploymentsList.Items { @@ -339,11 +339,11 @@ var _ = SIGDescribe("Deployment", func() { Object: testDeploymentUpdateUnstructuredMap, } // currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus - _, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(context.TODO(), &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status") + _, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(ctx, &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status") framework.ExpectNoError(err, "failed to update the DeploymentStatus") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if deployment, ok := event.Object.(*appsv1.Deployment); ok { @@ -363,7 +363,7 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Modified) ginkgo.By("fetching the DeploymentStatus") - deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status") + deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "failed to fetch the Deployment") deploymentGet := appsv1.Deployment{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) @@ -371,9 +371,9 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if deployment, ok := event.Object.(*appsv1.Deployment); ok { found := deployment.ObjectMeta.Name == testDeployment.Name && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && @@ -399,10 +399,14 @@ var _ = SIGDescribe("Deployment", func() { }, }) framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") - dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + // This test is broken, patching fails with: + // Deployment.apps "test-deployment" is invalid: status.availableReplicas: Invalid value: 2: cannot be greater than readyReplicas + // https://github.com/kubernetes/kubernetes/issues/113259 + _, _ = dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status") + + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if deployment, ok := event.Object.(*appsv1.Deployment); ok { @@ -418,16 +422,16 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Modified) ginkgo.By("fetching the DeploymentStatus") - deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status") + deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "failed to fetch the DeploymentStatus") deploymentGet = appsv1.Deployment{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if deployment, ok := event.Object.(*appsv1.Deployment); ok { found := deployment.ObjectMeta.Name == testDeployment.Name && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && @@ -445,12 +449,12 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas) ginkgo.By("deleting the Deployment") - err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) + err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) framework.ExpectNoError(err, "failed to delete Deployment via collection") - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute) + ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute) defer cancel() - _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: if deployment, ok := event.Object.(*appsv1.Deployment); ok { @@ -484,10 +488,10 @@ var _ = SIGDescribe("Deployment", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return dClient.Watch(context.TODO(), options) + return dClient.Watch(ctx, options) }, } - dList, err := c.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + dList, err := c.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Deployments") ginkgo.By("creating a Deployment") @@ -496,7 +500,7 @@ var _ = SIGDescribe("Deployment", func() { replicas := int32(1) framework.Logf("Creating simple deployment %s", dName) d := e2edeployment.NewDeployment(dName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -506,12 +510,12 @@ var _ = SIGDescribe("Deployment", func() { err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) - testDeployment, err := dClient.Get(context.TODO(), dName, metav1.GetOptions{}) + testDeployment, err := dClient.Get(ctx, dName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Getting /status") dResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} - dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(context.TODO(), dName, metav1.GetOptions{}, "status") + dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(ctx, dName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch the status of deployment %s in namespace %s", dName, ns) dStatusBytes, err := json.Marshal(dStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) @@ -525,7 +529,7 @@ var _ = SIGDescribe("Deployment", func() { var statusToUpdate, updatedStatus *appsv1.Deployment err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = dClient.Get(context.TODO(), dName, metav1.GetOptions{}) + statusToUpdate, err = dClient.Get(ctx, dName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to retrieve deployment %s", dName) statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DeploymentCondition{ @@ -535,17 +539,17 @@ var _ = SIGDescribe("Deployment", func() { Message: "Set from e2e test", }) - updatedStatus, err = dClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = dClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update status. %v", err) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the Deployment status to be updated") - ctx, cancel := context.WithTimeout(ctx, dRetryTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, dRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { if d, ok := event.Object.(*appsv1.Deployment); ok { found := d.ObjectMeta.Name == testDeployment.ObjectMeta.Name && d.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace && @@ -576,15 +580,15 @@ var _ = SIGDescribe("Deployment", func() { payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) framework.Logf("Patch payload: %v", string(payload)) - patchedDeployment, err := dClient.Patch(context.TODO(), dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") + patchedDeployment, err := dClient.Patch(ctx, dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch status. %v", err) framework.Logf("Patched status conditions: %#v", patchedDeployment.Status.Conditions) ginkgo.By("watching for the Deployment status to be patched") - ctx, cancel = context.WithTimeout(context.Background(), dRetryTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, dRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { if e, ok := event.Object.(*appsv1.Deployment); ok { found := e.ObjectMeta.Name == testDeployment.ObjectMeta.Name && @@ -611,8 +615,8 @@ var _ = SIGDescribe("Deployment", func() { }) }) -func failureTrap(c clientset.Interface, ns string) { - deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) +func failureTrap(ctx context.Context, c clientset.Interface, ns string) { + deployments, err := c.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) return @@ -638,7 +642,7 @@ func failureTrap(c clientset.Interface, ns string) { return } framework.Logf("Log out all the ReplicaSets if there is no deployment created") - rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) + rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) return @@ -650,7 +654,7 @@ func failureTrap(c clientset.Interface, ns string) { framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options) + podList, err := c.CoreV1().Pods(rs.Namespace).List(ctx, options) if err != nil { framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) continue @@ -666,29 +670,29 @@ func intOrStrP(num int) *intstr.IntOrString { return &intstr } -func stopDeployment(c clientset.Interface, ns, deploymentName string) { - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) +func stopDeployment(ctx context.Context, c clientset.Interface, ns, deploymentName string) { + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Deleting deployment %s", deploymentName) - err = e2eresource.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name) + err = e2eresource.DeleteResourceAndWaitForGC(ctx, c, appsinternal.Kind("Deployment"), ns, deployment.Name) framework.ExpectNoError(err) framework.Logf("Ensuring deployment %s was deleted", deploymentName) - _, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) + _, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{}) framework.ExpectError(err) framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) options := metav1.ListOptions{LabelSelector: selector.String()} - rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) + rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, options) framework.ExpectNoError(err) gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err = c.CoreV1().Pods(ns).List(ctx, options) if err != nil { return false, err } @@ -702,7 +706,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { } } -func testDeleteDeployment(f *framework.Framework) { +func testDeleteDeployment(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -712,7 +716,7 @@ func testDeleteDeployment(f *framework.Framework) { framework.Logf("Creating simple deployment %s", deploymentName) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -722,15 +726,15 @@ func testDeleteDeployment(f *framework.Framework) { err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) newRS, err := testutil.GetNewReplicaSet(deployment, c) framework.ExpectNoError(err) framework.ExpectNotEqual(newRS, nilRs) - stopDeployment(c, ns, deploymentName) + stopDeployment(ctx, c, ns, deploymentName) } -func testRollingUpdateDeployment(f *framework.Framework) { +func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet // Create webserver pods. @@ -748,17 +752,17 @@ func testRollingUpdateDeployment(f *framework.Framework) { rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs.Annotations = annotations framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) + _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) // Create a deployment to delete webserver pods and instead bring up agnhost pods. deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 3546343826724305833. @@ -772,14 +776,14 @@ func testRollingUpdateDeployment(f *framework.Framework) { // There should be 1 old RS (webserver-controller, which is adopted) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) _, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c) framework.ExpectNoError(err) framework.ExpectEqual(len(allOldRSs), 1) } -func testRecreateDeployment(f *framework.Framework) { +func testRecreateDeployment(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -787,7 +791,7 @@ func testRecreateDeployment(f *framework.Framework) { deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -808,12 +812,12 @@ func testRecreateDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) - err = watchRecreateDeployment(c, deployment) + err = watchRecreateDeployment(ctx, c, deployment) framework.ExpectNoError(err) } // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy -func testDeploymentCleanUpPolicy(f *framework.Framework) { +func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet // Create webserver pods. @@ -825,18 +829,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { rsName := "test-cleanup-controller" replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) + _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Create a deployment to delete webserver pods and instead bring up agnhost pods. deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) framework.ExpectNoError(err, "Failed to query for pods: %v", err) options := metav1.ListOptions{ @@ -844,7 +848,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } stopCh := make(chan struct{}) defer close(stopCh) - w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options) + w, err := c.CoreV1().Pods(ns).Watch(ctx, options) framework.ExpectNoError(err) go func() { defer ginkgo.GinkgoRecover() @@ -875,17 +879,17 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { }() d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit - _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + _, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) - err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) + err = waitForDeploymentOldRSsNum(ctx, c, ns, deploymentName, int(*revisionHistoryLimit)) framework.ExpectNoError(err) } // testRolloverDeployment tests that deployment supports rollover. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. -func testRolloverDeployment(f *framework.Framework) { +func testRolloverDeployment(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet podName := "rollover-pod" @@ -897,15 +901,15 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) + _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) - err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName) + err = e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName) framework.ExpectNoError(err) // Create a deployment to delete webserver pods and instead bring up redis-slave pods. @@ -921,11 +925,11 @@ func testRolloverDeployment(f *framework.Framework) { MaxSurge: intOrStrP(1), } newDeployment.Spec.MinReadySeconds = int32(10) - _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment, metav1.CreateOptions{}) + _, err = c.AppsV1().Deployments(ns).Create(ctx, newDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the pods were scaled up and down as expected. - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 @@ -937,7 +941,7 @@ func testRolloverDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Ensure that both replica sets have 1 created replica") - oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) + oldRS, err := c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(oldRS, int32(1)) newRS, err := testutil.GetNewReplicaSet(deployment, c) @@ -968,11 +972,11 @@ func testRolloverDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Ensure that both old replica sets have no replicas") - oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) + oldRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(oldRS, int32(0)) // Not really the new replica set anymore but we GET by name so that's fine. - newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{}) + newRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, newRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(newRS, int32(0)) } @@ -995,7 +999,7 @@ func randomScale(d *appsv1.Deployment, i int) { } } -func testIterativeDeployments(f *framework.Framework) { +func testIterativeDeployments(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -1012,7 +1016,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) iterations := 20 @@ -1075,7 +1079,7 @@ func testIterativeDeployments(f *framework.Framework) { selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) opts := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts) + podList, err := c.CoreV1().Pods(ns).List(ctx, opts) framework.ExpectNoError(err) if len(podList.Items) == 0 { framework.Logf("%02d: no deployment pods to delete", i) @@ -1087,7 +1091,7 @@ func testIterativeDeployments(f *framework.Framework) { } name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) - err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } @@ -1096,7 +1100,7 @@ func testIterativeDeployments(f *framework.Framework) { } // unpause the deployment if we end up pausing it - deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment.Spec.Paused { framework.Logf("Resuming deployment %q", deployment.Name) @@ -1119,7 +1123,7 @@ func testIterativeDeployments(f *framework.Framework) { framework.ExpectNoError(err) } -func testDeploymentsControllerRef(f *framework.Framework) { +func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -1128,44 +1132,44 @@ func testDeploymentsControllerRef(f *framework.Framework) { podLabels := map[string]string{"name": WebserverImageName} replicas := int32(1) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) - rsList := listDeploymentReplicaSets(c, ns, podLabels) + rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels) framework.ExpectEqual(len(rsList.Items), 1) framework.Logf("Obtaining the ReplicaSet's UID") orphanedRSUID := rsList.Items[0].UID framework.Logf("Checking the ReplicaSet has the right controllerRef") - err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) + err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels) framework.ExpectNoError(err) framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) - err = orphanDeploymentReplicaSets(c, deploy) + err = orphanDeploymentReplicaSets(ctx, c, deploy) framework.ExpectNoError(err) ginkgo.By("Wait for the ReplicaSet to be orphaned") - err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) + err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned") deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") - err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) + err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels) framework.ExpectNoError(err) framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) - rsList = listDeploymentReplicaSets(c, ns, podLabels) + rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels) framework.ExpectEqual(len(rsList.Items), 1) framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") @@ -1175,7 +1179,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle // of a rollout (either in progress or paused), then the Deployment will balance additional replicas // in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk. -func testProportionalScalingDeployment(f *framework.Framework) { +func testProportionalScalingDeployment(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -1190,7 +1194,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Waiting for observed generation %d", deployment.Generation) @@ -1199,7 +1203,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Verify that the required pods have come up. framework.Logf("Waiting for all required pods to come up") - err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas)) + err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas)) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.Logf("Waiting for deployment %q to complete", deployment.Name) @@ -1228,19 +1232,19 @@ func testProportionalScalingDeployment(f *framework.Framework) { // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. minAvailableReplicas := replicas - int32(maxUnavailable) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) - err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) + err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, c, firstRS, minAvailableReplicas) framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 too. framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) - err = waitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas) + err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, minAvailableReplicas) framework.ExpectNoError(err) // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) + err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), firstRS) framework.ExpectNoError(err) // Checking state of second rollout's replicaset. @@ -1257,14 +1261,14 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) - err = waitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas) + err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, newReplicas) framework.ExpectNoError(err) // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) + err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), secondRS) framework.ExpectNoError(err) // Check the deployment's minimum availability. @@ -1283,26 +1287,26 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") - err = waitForReplicaSetTargetSpecReplicas(c, firstRS, 20) + err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, 20) framework.ExpectNoError(err) // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") - err = waitForReplicaSetTargetSpecReplicas(c, secondRS, 13) + err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, 13) framework.ExpectNoError(err) } -func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { - rsList := listDeploymentReplicaSets(c, ns, label) +func checkDeploymentReplicaSetsControllerRef(ctx context.Context, c clientset.Interface, ns string, uid types.UID, label map[string]string) error { + rsList := listDeploymentReplicaSets(ctx, c, ns, label) for _, rs := range rsList.Items { // This rs is adopted only when its controller ref is update if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid { @@ -1312,9 +1316,9 @@ func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, u return nil } -func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) { - return func() (bool, error) { - rsList := listDeploymentReplicaSets(c, ns, label) +func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + rsList := listDeploymentReplicaSets(ctx, c, ns, label) for _, rs := range rsList.Items { // This rs is orphaned only when controller ref is cleared if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil { @@ -1325,23 +1329,23 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m } } -func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList { +func listDeploymentReplicaSets(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) + rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, options) framework.ExpectNoError(err) gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0)) return rsList } -func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error { +func orphanDeploymentReplicaSets(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error { trueVar := true deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) - return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions) + return c.AppsV1().Deployments(d.Namespace).Delete(ctx, d.Name, deleteOptions) } -func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) { +func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -1372,7 +1376,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew MaxSurge: intOrStrP(1), MaxUnavailable: intOrStrP(0), } - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeployment.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) @@ -1380,7 +1384,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) jig := e2eservice.NewTestJig(c, ns, name) jig.Labels = podLabels - service, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(c), func(svc *v1.Service) { + service, err := jig.CreateLoadBalancerService(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, c), func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal }) framework.ExpectNoError(err) @@ -1393,9 +1397,9 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew if framework.ProviderIs("aws") { timeout = e2eservice.LoadBalancerLagTimeoutAWS } - e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) + e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout) - expectedNodes, err := jig.GetEndpointNodeNames() + expectedNodes, err := jig.GetEndpointNodeNames(ctx) framework.ExpectNoError(err) framework.Logf("Starting a goroutine to watch the service's endpoints in the background") @@ -1409,7 +1413,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew // Thus the set of nodes with local endpoints for the service // should remain unchanged. wait.Until(func() { - actualNodes, err := jig.GetEndpointNodeNames() + actualNodes, err := jig.GetEndpointNodeNames(ctx) if err != nil { framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err) failed <- struct{}{} @@ -1505,7 +1509,7 @@ func setAffinities(d *appsv1.Deployment, setAffinity bool) { // watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with // old pods. -func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error { +func watchRecreateDeployment(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error { if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType { return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) } @@ -1514,7 +1518,7 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), options) + return c.AppsV1().Deployments(d.Namespace).Watch(ctx, options) }, } @@ -1540,9 +1544,9 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error d.Generation <= d.Status.ObservedGeneration, nil } - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + ctxUntil, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() - _, err := watchtools.Until(ctx, d.ResourceVersion, w, condition) + _, err := watchtools.Until(ctxUntil, d.ResourceVersion, w, condition) if err == wait.ErrWaitTimeout { err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status) } @@ -1550,12 +1554,12 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error } // waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs. -func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { +func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { var oldRSs []*appsv1.ReplicaSet var d *appsv1.Deployment pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1575,10 +1579,10 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string } // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. -func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { +func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { desiredGeneration := replicaSet.Generation - err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { - rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { + rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1591,10 +1595,10 @@ func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep } // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum -func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { +func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { desiredGeneration := replicaSet.Generation - err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1633,14 +1637,14 @@ func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa } // Deployment should have a working scale subresource -func testDeploymentSubresources(f *framework.Framework) { +func testDeploymentSubresources(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet deploymentName := "test-new-deployment" framework.Logf("Creating simple deployment %s", deploymentName) d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) + deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -1650,11 +1654,11 @@ func testDeploymentSubresources(f *framework.Framework) { err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) - _, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + _, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("getting scale subresource") - scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{}) + scale, err := c.AppsV1().Deployments(ns).GetScale(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } @@ -1664,14 +1668,14 @@ func testDeploymentSubresources(f *framework.Framework) { ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = 2 - scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{}) + scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(ctx, deploymentName, scale, metav1.UpdateOptions{}) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) ginkgo.By("verifying the deployment Spec.Replicas was modified") - deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get deployment resource: %v", err) } @@ -1687,10 +1691,10 @@ func testDeploymentSubresources(f *framework.Framework) { }) framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") - _, err = c.AppsV1().Deployments(ns).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale") + _, err = c.AppsV1().Deployments(ns).Patch(ctx, deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch deployment: %v", err) - deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get deployment resource: %v", err) framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas") } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 902c7939a11..c6c684908f7 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -87,16 +87,16 @@ var _ = SIGDescribe("DisruptionController", func() { framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) { specialLabels := map[string]string{"foo_pdb": "bar_pdb"} labelSelector := labels.SelectorFromSet(specialLabels).String() - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels) - createPDBMinAvailableOrDie(cs, ns, "foo2", intstr.FromString("1%"), specialLabels) - createPDBMinAvailableOrDie(anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(2), specialLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, "foo2", intstr.FromString("1%"), specialLabels) + createPDBMinAvailableOrDie(ctx, anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels) ginkgo.By("listing a collection of PDBs across all namespaces") - listPDBs(cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"}) + listPDBs(ctx, cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"}) ginkgo.By("listing a collection of PDBs in namespace " + ns) - listPDBs(cs, ns, labelSelector, 2, []string{defaultName, "foo2"}) - deletePDBCollection(cs, ns) + listPDBs(ctx, cs, ns, labelSelector, 2, []string{defaultName, "foo2"}) + deletePDBCollection(ctx, cs, ns) }) }) @@ -107,10 +107,10 @@ var _ = SIGDescribe("DisruptionController", func() { */ framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) { ginkgo.By("creating the pdb") - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromString("1%"), defaultLabels) ginkgo.By("updating the pdb") - updatedPDB := updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { + updatedPDB := updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { newMinAvailable := intstr.FromString("2%") pdb.Spec.MinAvailable = &newMinAvailable return pdb @@ -118,7 +118,7 @@ var _ = SIGDescribe("DisruptionController", func() { framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%") ginkgo.By("patching the pdb") - patchedPDB := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { + patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { newBytes, err := json.Marshal(map[string]interface{}{ "spec": map[string]interface{}{ "minAvailable": "3%", @@ -129,7 +129,7 @@ var _ = SIGDescribe("DisruptionController", func() { }) framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%") - deletePDBOrDie(cs, ns, defaultName) + deletePDBOrDie(ctx, cs, ns, defaultName) }) /* @@ -139,15 +139,15 @@ var _ = SIGDescribe("DisruptionController", func() { how many disruptions are allowed. */ framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) { - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels) - createPodsOrDie(cs, ns, 3) - waitForPodsOrDie(cs, ns, 3) + createPodsOrDie(ctx, cs, ns, 3) + waitForPodsOrDie(ctx, cs, ns, 3) // Since disruptionAllowed starts out 0, if we see it ever become positive, // that means the controller is working. - err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{}) if err != nil { return false, err } @@ -162,25 +162,25 @@ var _ = SIGDescribe("DisruptionController", func() { Description: PodDisruptionBudget API must support update and patch operations on status subresource. */ framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) { - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels) ginkgo.By("Updating PodDisruptionBudget status") // PDB status can be updated by both PDB controller and the status API. The test selects `DisruptedPods` field to show immediate update via API. // The pod has to exist, otherwise wil be removed by the controller. Other fields may not reflect the change from API. - createPodsOrDie(cs, ns, 1) - waitForPodsOrDie(cs, ns, 1) - pod, _ := locateRunningPod(cs, ns) - updatePDBOrDie(cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { + createPodsOrDie(ctx, cs, ns, 1) + waitForPodsOrDie(ctx, cs, ns, 1) + pod, _ := locateRunningPod(ctx, cs, ns) + updatePDBOrDie(ctx, cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { old.Status.DisruptedPods = make(map[string]metav1.Time) old.Status.DisruptedPods[pod.Name] = metav1.NewTime(time.Now()) return old }, cs.PolicyV1().PodDisruptionBudgets(ns).UpdateStatus) // fetch again to make sure the update from API was effective - updated := getPDBStatusOrDie(dc, ns, defaultName) + updated := getPDBStatusOrDie(ctx, dc, ns, defaultName) framework.ExpectHaveKey(updated.Status.DisruptedPods, pod.Name, "Expecting the DisruptedPods have %s", pod.Name) ginkgo.By("Patching PodDisruptionBudget status") - patched := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { + patched := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { oldBytes, err := json.Marshal(old) framework.ExpectNoError(err, "failed to marshal JSON for old data") old.Status.DisruptedPods = make(map[string]metav1.Time) @@ -193,15 +193,15 @@ var _ = SIGDescribe("DisruptionController", func() { // PDB shouldn't error out when there are unmanaged pods ginkgo.It("should observe that the PodDisruptionBudget status is not updated for unmanaged pods", - func() { - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) + func(ctx context.Context) { + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels) - createPodsOrDie(cs, ns, 3) - waitForPodsOrDie(cs, ns, 3) + createPodsOrDie(ctx, cs, ns, 3) + waitForPodsOrDie(ctx, cs, ns, 3) // Since we allow unmanaged pods to be associated with a PDB, we should not see any error - gomega.Consistently(func() (bool, error) { - pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{}) + gomega.Consistently(ctx, func(ctx context.Context) (bool, error) { + pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{}) if err != nil { return false, err } @@ -291,21 +291,21 @@ var _ = SIGDescribe("DisruptionController", func() { if c.skipForBigClusters { e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1) } - createPodsOrDie(cs, ns, c.podCount) + createPodsOrDie(ctx, cs, ns, c.podCount) if c.replicaSetSize > 0 { - createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive) + createReplicaSetOrDie(ctx, cs, ns, c.replicaSetSize, c.exclusive) } if c.minAvailable.String() != "" { - createPDBMinAvailableOrDie(cs, ns, defaultName, c.minAvailable, defaultLabels) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, c.minAvailable, defaultLabels) } if c.maxUnavailable.String() != "" { - createPDBMaxUnavailableOrDie(cs, ns, defaultName, c.maxUnavailable) + createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, c.maxUnavailable) } // Locate a running pod. - pod, err := locateRunningPod(cs, ns) + pod, err := locateRunningPod(ctx, cs, ns) framework.ExpectNoError(err) e := &policyv1.Eviction{ @@ -316,19 +316,19 @@ var _ = SIGDescribe("DisruptionController", func() { } if c.shouldDeny { - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) framework.ExpectError(err, "pod eviction should fail") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") } else { // Only wait for running pods in the "allow" case // because one of shouldDeny cases relies on the // replicaSet not fitting on the cluster. - waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize)) + waitForPodsOrDie(ctx, cs, ns, c.podCount+int(c.replicaSetSize)) // Since disruptionAllowed starts out false, if an eviction is ever allowed, // that means the controller is working. - err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + err = wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) if err != nil { return false, nil } @@ -346,13 +346,13 @@ var _ = SIGDescribe("DisruptionController", func() { */ framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) { ginkgo.By("Creating a pdb that targets all three pods in a test replica set") - createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels) - createReplicaSetOrDie(cs, ns, 3, false) + createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(3), defaultLabels) + createReplicaSetOrDie(ctx, cs, ns, 3, false) ginkgo.By("First trying to evict a pod which shouldn't be evictable") - waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb + waitForPodsOrDie(ctx, cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb - pod, err := locateRunningPod(cs, ns) + pod, err := locateRunningPod(ctx, cs, ns) framework.ExpectNoError(err) e := &policyv1.Eviction{ ObjectMeta: metav1.ObjectMeta{ @@ -360,25 +360,25 @@ var _ = SIGDescribe("DisruptionController", func() { Namespace: ns, }, } - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) framework.ExpectError(err, "pod eviction should fail") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") ginkgo.By("Updating the pdb to allow a pod to be evicted") - updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { + updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { newMinAvailable := intstr.FromInt(2) pdb.Spec.MinAvailable = &newMinAvailable return pdb }, cs.PolicyV1().PodDisruptionBudgets(ns).Update) ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") - waitForPodsOrDie(cs, ns, 3) - waitForPdbToObserveHealthyPods(cs, ns, 3) - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + waitForPodsOrDie(ctx, cs, ns, 3) + waitForPdbToObserveHealthyPods(ctx, cs, ns, 3) + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) framework.ExpectNoError(err) // the eviction is now allowed ginkgo.By("Patching the pdb to disallow a pod to be evicted") - patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { + patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { oldData, err := json.Marshal(old) framework.ExpectNoError(err, "failed to marshal JSON for old data") old.Spec.MinAvailable = nil @@ -389,8 +389,8 @@ var _ = SIGDescribe("DisruptionController", func() { return jsonpatch.CreateMergePatch(oldData, newData) }) - waitForPodsOrDie(cs, ns, 3) - pod, err = locateRunningPod(cs, ns) // locate a new running pod + waitForPodsOrDie(ctx, cs, ns, 3) + pod, err = locateRunningPod(ctx, cs, ns) // locate a new running pod framework.ExpectNoError(err) e = &policyv1.Eviction{ ObjectMeta: metav1.ObjectMeta{ @@ -398,22 +398,22 @@ var _ = SIGDescribe("DisruptionController", func() { Namespace: ns, }, } - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) framework.ExpectError(err, "pod eviction should fail") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") ginkgo.By("Deleting the pdb to allow a pod to be evicted") - deletePDBOrDie(cs, ns, defaultName) + deletePDBOrDie(ctx, cs, ns, defaultName) ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") - waitForPodsOrDie(cs, ns, 3) - err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) + waitForPodsOrDie(ctx, cs, ns, 3) + err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) framework.ExpectNoError(err) // the eviction is now allowed }) }) -func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) { +func createPDBMinAvailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) { pdb := policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -425,12 +425,12 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string, MinAvailable: &minAvailable, }, } - _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) + _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{}) framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns) - waitForPdbToBeProcessed(cs, ns, name) + waitForPdbToBeProcessed(ctx, cs, ns, name) } -func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) { +func createPDBMaxUnavailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) { pdb := policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -441,39 +441,39 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name strin MaxUnavailable: &maxUnavailable, }, } - _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) + _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{}) framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns) - waitForPdbToBeProcessed(cs, ns, name) + waitForPdbToBeProcessed(ctx, cs, ns, name) } type updateFunc func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget type updateRestAPI func(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) type patchFunc func(pdb *policyv1.PodDisruptionBudget) ([]byte, error) -func updatePDBOrDie(cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) { +func updatePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return err } old = f(old) - if updated, err = api(context.TODO(), old, metav1.UpdateOptions{}); err != nil { + if updated, err = api(ctx, old, metav1.UpdateOptions{}); err != nil { return err } return nil }) framework.ExpectNoError(err, "Waiting for the PDB update to be processed in namespace %s", ns) - waitForPdbToBeProcessed(cs, ns, name) + waitForPdbToBeProcessed(ctx, cs, ns, name) return updated } -func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) { +func patchPDBOrDie(ctx context.Context, cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - old := getPDBStatusOrDie(dc, ns, name) + old := getPDBStatusOrDie(ctx, dc, ns, name) patchBytes, err := f(old) framework.ExpectNoError(err) - if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil { + if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(ctx, old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil { return err } framework.ExpectNoError(err) @@ -481,18 +481,18 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam }) framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns) - waitForPdbToBeProcessed(cs, ns, name) + waitForPdbToBeProcessed(ctx, cs, ns, name) return updated } -func deletePDBOrDie(cs kubernetes.Interface, ns string, name string) { - err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) +func deletePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string) { + err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns) - waitForPdbToBeDeleted(cs, ns, name) + waitForPdbToBeDeleted(ctx, cs, ns, name) } -func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) { - pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) +func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) { + pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns) framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns) @@ -503,18 +503,18 @@ func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count in framework.ExpectConsistOf(pdbNames, expectedPDBNames, "Expecting returned PDBs '%s' in namespace %s", expectedPDBNames, ns) } -func deletePDBCollection(cs kubernetes.Interface, ns string) { +func deletePDBCollection(ctx context.Context, cs kubernetes.Interface, ns string) { ginkgo.By("deleting a collection of PDBs") - err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}) framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns) - waitForPDBCollectionToBeDeleted(cs, ns) + waitForPDBCollectionToBeDeleted(ctx, cs, ns) } -func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) { +func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) { ginkgo.By("Waiting for the PDB collection to be deleted") - err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -526,7 +526,7 @@ func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) { framework.ExpectNoError(err, "Waiting for the PDB collection to be deleted in namespace %s", ns) } -func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { +func createPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) { for i := 0; i < n; i++ { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -545,15 +545,15 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { }, } - _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns) } } -func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { +func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) { ginkgo.By("Waiting for all pods to be running") - err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"}) if err != nil { return false, err } @@ -580,7 +580,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns) } -func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) { +func createReplicaSetOrDie(ctx context.Context, cs kubernetes.Interface, ns string, size int32, exclusive bool) { container := v1.Container{ Name: "donothing", Image: imageutils.GetPauseImageName(), @@ -612,14 +612,14 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu }, } - _, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) + _, err := cs.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) } -func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { +func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { ginkgo.By("locating a running pod") - err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -637,10 +637,10 @@ func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err erro return pod, err } -func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) { +func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) { ginkgo.By("Waiting for the pdb to be processed") - err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -652,10 +652,10 @@ func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) { framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns) } -func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) { +func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) { ginkgo.By("Waiting for the pdb to be deleted") - err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done } @@ -667,10 +667,10 @@ func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) { framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns) } -func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) { +func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) { ginkgo.By("Waiting for the pdb to observed all healthy pods") - err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { - pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, wait.ForeverTestTimeout, func(ctx context.Context) (bool, error) { + pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{}) if err != nil { return false, err } @@ -682,9 +682,9 @@ func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyC framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount) } -func getPDBStatusOrDie(dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget { +func getPDBStatusOrDie(ctx context.Context, dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget { pdbStatusResource := policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets") - unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(context.TODO(), name, metav1.GetOptions{}, "status") + unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(ctx, name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) pdb, err := unstructuredToPDB(unstruct) framework.ExpectNoError(err, "Getting the status of the pdb %s in namespace %s", name, ns) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 2944f45be62..491fd932efd 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -82,15 +82,15 @@ var _ = SIGDescribe("Job", func() { ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring pods for job exist") - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) successes := int32(0) for _, pod := range pods.Items { @@ -110,7 +110,7 @@ var _ = SIGDescribe("Job", func() { // the Job's Pods to be scheduled to a single Node and use a hostPath // volume to persist data across new Pods. ginkgo.By("Looking for a node to schedule job pod") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating a job") @@ -126,7 +126,7 @@ var _ = SIGDescribe("Job", func() { }, }, } - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job fails") @@ -146,7 +146,7 @@ var _ = SIGDescribe("Job", func() { backoffLimit := int32(0) ginkgo.By("Looking for a node to schedule job pod") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating a job") @@ -162,11 +162,11 @@ var _ = SIGDescribe("Job", func() { }, }, } - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) @@ -186,7 +186,7 @@ var _ = SIGDescribe("Job", func() { // 5. Evict the 0-indexed pod // 6. Await for the job to successfully complete ginkgo.DescribeTable("Using a pod failure policy to not count some failures towards the backoffLimit", - func(policy *batchv1.PodFailurePolicy) { + func(ctx context.Context, policy *batchv1.PodFailurePolicy) { mode := batchv1.IndexedCompletion // We set the backoffLimit to 0 so that any pod failure would trigger @@ -195,25 +195,25 @@ var _ = SIGDescribe("Job", func() { backoffLimit := int32(0) ginkgo.By("Looking for a node to schedule job pods") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating a job") job := e2ejob.NewTestJobOnNode("notTerminateOnce", "pod-disruption-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) job.Spec.CompletionMode = &mode job.Spec.PodFailurePolicy = policy - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Awaiting for all non 0-indexed pods to succeed to ensure the marker file is created") - err = e2ejob.WaitForJobPodsSucceeded(f.ClientSet, f.Namespace.Name, job.Name, completions-1) + err = e2ejob.WaitForJobPodsSucceeded(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions-1) framework.ExpectNoError(err, "failed to await for all non 0-indexed pods to succeed for job: %s/%s", job.Name, job.Namespace) ginkgo.By("Awaiting for the 0-indexed pod to be running") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1) framework.ExpectNoError(err, "failed to await for the 0-indexed pod to be running for the job: %s/%s", job.Name, job.Namespace) - pods, err := e2ejob.GetAllRunningJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace) framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected") pod := pods[0] @@ -228,11 +228,11 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to evict the pod: %s/%s", pod.Name, pod.Namespace) ginkgo.By(fmt.Sprintf("Awaiting for the pod: %s/%s to be deleted", pod.Name, pod.Namespace)) - err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete) framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }, ginkgo.Entry("Ignore DisruptionTarget condition", &batchv1.PodFailurePolicy{ @@ -276,12 +276,12 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Creating a job with suspend=true") job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job.Spec.Suspend = pointer.BoolPtr(true) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring pods aren't created for job") framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) if err != nil { return false, err } @@ -289,7 +289,7 @@ var _ = SIGDescribe("Job", func() { }), wait.ErrWaitTimeout) ginkgo.By("Checking Job status to observe Suspended state") - job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) + job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to retrieve latest job object") exists := false for _, c := range job.Status.Conditions { @@ -302,11 +302,11 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Updating the job with suspend=false") job.Spec.Suspend = pointer.BoolPtr(false) - job, err = e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) ginkgo.By("Waiting for job to complete") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) @@ -314,21 +314,21 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Creating a job with suspend=false") job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job.Spec.Suspend = pointer.Bool(false) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensure pods equal to parallelism count is attached to the job") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) ginkgo.By("Updating the job with suspend=true") err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { - job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) + job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) if err != nil { return false, err } job.Spec.Suspend = pointer.Bool(true) - updatedJob, err := e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job) + updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job) if err == nil { job = updatedJob return true, nil @@ -341,11 +341,11 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring pods are deleted") - err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name) + err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to ensure pods are deleted after suspend=true") ginkgo.By("Checking Job status to observe Suspended state") - job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) + job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to retrieve latest job object") exists := false for _, c := range job.Status.Conditions { @@ -368,15 +368,15 @@ var _ = SIGDescribe("Job", func() { job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) mode := batchv1.IndexedCompletion job.Spec.CompletionMode = &mode - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring pods with index for job exist") - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) succeededIndexes := sets.NewInt() for _, pod := range pods.Items { @@ -401,19 +401,19 @@ var _ = SIGDescribe("Job", func() { ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensure pods equal to parallelism count is attached to the job") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) ginkgo.By("Delete the job") - err = e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name) + err = e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensure the pods associated with the job are also deleted") - err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name) + err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) }) @@ -431,11 +431,11 @@ var _ = SIGDescribe("Job", func() { // up to 5 minutes between restarts, making test timeout due to // successive failures too likely with a reasonable test timeout. job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) @@ -449,16 +449,16 @@ var _ = SIGDescribe("Job", func() { // Instead, we force the Job's Pods to be scheduled to a single Node // and use a hostPath volume to persist data across new Pods. ginkgo.By("Looking for a node to schedule job pod") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating a job") job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) @@ -466,10 +466,10 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Creating a job") var activeDeadlineSeconds int64 = 1 job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job past active deadline") - err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded") + err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded") framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name) }) @@ -481,18 +481,18 @@ var _ = SIGDescribe("Job", func() { framework.ConformanceIt("should delete a job", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring active pods == parallelism") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) ginkgo.By("delete a job") - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) ginkgo.By("Ensuring job was deleted") - _, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) + _, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectEqual(apierrors.IsNotFound(err), true) }) @@ -510,25 +510,25 @@ var _ = SIGDescribe("Job", func() { // Replace job with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned job. kind := job.Kind - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) job.Kind = kind ginkgo.By("Ensuring active pods == parallelism") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) ginkgo.By("Orphaning one of the Job's Pods") - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism))) pod := pods.Items[0] - e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) ginkgo.By("Checking that the Job readopts the Pod") - gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout, + gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -542,12 +542,12 @@ var _ = SIGDescribe("Job", func() { )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) ginkgo.By("Removing the labels from the Job's Pod") - e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) ginkgo.By("Checking that the Job releases the Pod") - gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout, + gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { @@ -562,15 +562,15 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Creating a job") backoff := 1 job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff)) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job exceed backofflimit") - err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded") + err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded") framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name) ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1)) - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1)) for _, pod := range pods.Items { @@ -581,8 +581,8 @@ var _ = SIGDescribe("Job", func() { ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) { ginkgo.By("Creating a job that with CPU requests") - testNodeName := scheduling.GetNodeThatCanRunPod(f) - targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{}) + testNodeName := scheduling.GetNodeThatCanRunPod(ctx, f) + targetNode, err := f.ClientSet.CoreV1().Nodes().Get(ctx, testNodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName) cpu, ok := targetNode.Status.Allocatable[v1.ResourceCPU] @@ -605,15 +605,15 @@ var _ = SIGDescribe("Job", func() { } framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest) - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, largeCompletions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, largeCompletions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring pods for job exist") - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) successes := int32(0) for _, pod := range pods.Items { @@ -640,11 +640,11 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) - job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensure pods equal to parallelism count is attached to the job") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) // /status subresource operations @@ -657,7 +657,7 @@ var _ = SIGDescribe("Job", func() { jStatusJSON, err := json.Marshal(jStatus) framework.ExpectNoError(err) - patchedStatus, err := jClient.Patch(context.TODO(), job.Name, types.MergePatchType, + patchedStatus, err := jClient.Patch(ctx, job.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(jStatusJSON)+`}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) @@ -669,12 +669,12 @@ var _ = SIGDescribe("Job", func() { now2 := metav1.Now().Rfc3339Copy() var statusToUpdate, updatedStatus *batchv1.Job err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = jClient.Get(context.TODO(), job.Name, metav1.GetOptions{}) + statusToUpdate, err = jClient.Get(ctx, job.Name, metav1.GetOptions{}) if err != nil { return err } statusToUpdate.Status.StartTime = &now2 - updatedStatus, err = jClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = jClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -682,7 +682,7 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("get /status") jResource := schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} - gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(context.TODO(), job.Name, metav1.GetOptions{}, "status") + gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(ctx, job.Name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") framework.ExpectNoError(err) @@ -711,22 +711,22 @@ var _ = SIGDescribe("Job", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return jobClient.Watch(context.TODO(), options) + return jobClient.Watch(ctx, options) }, } - jobsList, err := jobClient.List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + jobsList, err := jobClient.List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Job") ginkgo.By("Creating a suspended job") job := e2ejob.NewTestJob("succeed", jobName, v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job.Labels = label job.Spec.Suspend = pointer.BoolPtr(true) - job, err = e2ejob.CreateJob(f.ClientSet, ns, job) + job, err = e2ejob.CreateJob(ctx, f.ClientSet, ns, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", ns) ginkgo.By("Patching the Job") payload := "{\"metadata\":{\"labels\":{\"" + jobName + "\":\"patched\"}}}" - patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(context.TODO(), jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) + patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(ctx, jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Job %s in namespace %s", jobName, ns) ginkgo.By("Watching for Job to be patched") @@ -741,21 +741,21 @@ var _ = SIGDescribe("Job", func() { updatedKey: jobName, updatedValue: "patched", } - waitForJobEvent(c) + waitForJobEvent(ctx, c) framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels) ginkgo.By("Updating the job") var updatedJob *batchv1.Job err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - patchedJob, err = jobClient.Get(context.TODO(), jobName, metav1.GetOptions{}) + patchedJob, err = jobClient.Get(ctx, jobName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get job %s", jobName) patchedJob.Spec.Suspend = pointer.BoolPtr(false) if patchedJob.Annotations == nil { patchedJob.Annotations = map[string]string{} } patchedJob.Annotations["updated"] = "true" - updatedJob, err = e2ejob.UpdateJob(f.ClientSet, ns, patchedJob) + updatedJob, err = e2ejob.UpdateJob(ctx, f.ClientSet, ns, patchedJob) return err }) framework.ExpectNoError(err, "failed to update job in namespace: %s", ns) @@ -772,24 +772,24 @@ var _ = SIGDescribe("Job", func() { updatedKey: "updated", updatedValue: "true", } - waitForJobEvent(c) + waitForJobEvent(ctx, c) framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation") framework.Logf("Found Job annotations: %#v", patchedJob.Annotations) ginkgo.By("Listing all Jobs with LabelSelector") - jobs, err := f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Failed to list job. %v", err) framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName) testJob := jobs.Items[0] framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels) ginkgo.By("Waiting for job to complete") - err = e2ejob.WaitForJobComplete(f.ClientSet, ns, jobName, completions) + err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, completions) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns) ginkgo.By("Delete a job collection with a labelselector") propagationPolicy := metav1.DeletePropagationBackground - err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector}) + err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(ctx, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to delete job %s in namespace: %s", job.Name, ns) ginkgo.By("Watching for Job to be deleted") @@ -804,10 +804,10 @@ var _ = SIGDescribe("Job", func() { updatedKey: "e2e-job-label", updatedValue: jobName, } - waitForJobEvent(c) + waitForJobEvent(ctx, c) ginkgo.By("Relist jobs to confirm deletion") - jobs, err = f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Failed to list job. %v", err) framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName) }) @@ -817,9 +817,9 @@ var _ = SIGDescribe("Job", func() { // waitForJobEvent is used to track and log Job events. // As delivery of events is not actually guaranteed we // will not return an error if we miss the required event. -func waitForJobEvent(config watchEventConfig) { +func waitForJobEvent(ctx context.Context, config watchEventConfig) { f := config.framework - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort) + ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort) defer cancel() _, err := watchtools.Until(ctx, config.resourceVersion, config.w, func(event watch.Event) (bool, error) { if job, ok := event.Object.(*batchv1.Job); ok { @@ -847,15 +847,15 @@ func waitForJobEvent(config watchEventConfig) { return false, nil }) if err != nil { - j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(context.TODO(), config.jobName, metav1.GetOptions{}) + j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(ctx, config.jobName, metav1.GetOptions{}) framework.Logf("We missed the %v event. Job details: %+v", config.watchEvent, j) } } // waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. -func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { +func waitForJobFailure(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) + curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index b56d63a69a2..e7f38122985 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -65,14 +65,14 @@ var _ = SIGDescribe("ReplicationController", func() { Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. */ framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { - TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) + TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) }) ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images e2eskipper.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) - TestReplicationControllerServeImageOrFail(f, "private", privateimage.GetE2EImage()) + TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage()) }) /* @@ -81,7 +81,7 @@ var _ = SIGDescribe("ReplicationController", func() { Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail */ framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { - testReplicationControllerConditionCheck(f) + testReplicationControllerConditionCheck(ctx, f) }) /* @@ -90,7 +90,7 @@ var _ = SIGDescribe("ReplicationController", func() { Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod */ framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) { - testRCAdoptMatchingOrphans(f) + testRCAdoptMatchingOrphans(ctx, f) }) /* @@ -99,7 +99,7 @@ var _ = SIGDescribe("ReplicationController", func() { Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references. */ framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) { - testRCReleaseControlledNotMatching(f) + testRCReleaseControlledNotMatching(ctx, f) }) /* @@ -145,17 +145,17 @@ var _ = SIGDescribe("ReplicationController", func() { }, } - framework.WatchEventSequenceVerifier(context.TODO(), dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) { + framework.WatchEventSequenceVerifier(ctx, dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) { ginkgo.By("creating a ReplicationController") // Create a ReplicationController - _, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(context.TODO(), &rcTest, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(ctx, &rcTest, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create ReplicationController") ginkgo.By("waiting for RC to be added") eventFound := false - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + ctxUntil, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Added { return false, nil } @@ -168,9 +168,9 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("waiting for available Replicas") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { var rc *v1.ReplicationController rcBytes, err := json.Marshal(watchEvent.Object) if err != nil { @@ -197,14 +197,14 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch") // Patch the ReplicationController ginkgo.By("patching ReplicationController") - testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) + testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "Failed to patch ReplicationController") framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC") ginkgo.By("waiting for RC to be modified") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Modified { return false, nil } @@ -225,14 +225,14 @@ var _ = SIGDescribe("ReplicationController", func() { // Patch the ReplicationController's status ginkgo.By("patching ReplicationController status") - rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") + rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus") framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") ginkgo.By("waiting for RC to be modified") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Modified { return false, nil } @@ -244,7 +244,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added) ginkgo.By("waiting for available Replicas") - _, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { var rc *v1.ReplicationController rcBytes, err := json.Marshal(watchEvent.Object) if err != nil { @@ -263,7 +263,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count") ginkgo.By("fetching ReplicationController status") - rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}, "status") + rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus") rcStatusUjson, err := json.Marshal(rcStatusUnstructured) @@ -280,13 +280,13 @@ var _ = SIGDescribe("ReplicationController", func() { // Patch the ReplicationController's scale ginkgo.By("patching ReplicationController scale") - _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") + _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale") ginkgo.By("waiting for RC to be modified") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Modified { return false, nil } @@ -299,7 +299,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("waiting for ReplicationController's scale to be the max amount") eventFound = false - _, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { var rc *v1.ReplicationController rcBytes, err := json.Marshal(watchEvent.Object) if err != nil { @@ -320,7 +320,7 @@ var _ = SIGDescribe("ReplicationController", func() { // Get the ReplicationController ginkgo.By("fetching ReplicationController; ensuring that it's patched") - rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch ReplicationController") framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") @@ -330,14 +330,14 @@ var _ = SIGDescribe("ReplicationController", func() { // Replace the ReplicationController's status ginkgo.By("updating ReplicationController status") - _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(ctx, rcStatusUpdatePayload, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update ReplicationControllerStatus") ginkgo.By("waiting for RC to be modified") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Modified { return false, nil } @@ -349,7 +349,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added) ginkgo.By("listing all ReplicationControllers") - rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(ctx, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "failed to list ReplicationController") framework.ExpectEqual(len(rcs.Items) > 0, true) @@ -367,14 +367,14 @@ var _ = SIGDescribe("ReplicationController", func() { // Delete ReplicationController ginkgo.By("deleting ReplicationControllers by collection") - err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to delete ReplicationControllers") ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent") eventFound = false - ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) defer cancel() - _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { + _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) { if watchEvent.Type != watch.Deleted { return false, nil } @@ -387,7 +387,7 @@ var _ = SIGDescribe("ReplicationController", func() { return actualWatchEvents }, func() (err error) { - _ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + _ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) return err }) }) @@ -407,25 +407,25 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By(fmt.Sprintf("Creating ReplicationController %q", rcName)) rc := newRC(rcName, initialRCReplicaCount, map[string]string{"name": rcName}, WebserverImageName, WebserverImage, nil) - _, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + _, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err) - err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) + err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName)) - scale, err := rcClient.GetScale(context.TODO(), rcName, metav1.GetOptions{}) + scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get scale subresource: %v", err) framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count") ginkgo.By("Updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = expectedRCReplicaCount - _, err = rcClient.UpdateScale(context.TODO(), rcName, scale, metav1.UpdateOptions{}) + _, err = rcClient.UpdateScale(ctx, rcName, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update scale subresource: %v", err) ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName)) - err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) + err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") }) }) @@ -460,7 +460,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa // TestReplicationControllerServeImageOrFail is a basic test to check // the deployment of an image using a replication controller. // The image serves its hostname which is checked for each replica. -func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) { +func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) replicas := int32(1) @@ -471,12 +471,12 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{}) framework.ExpectNoError(err) // Check that pods for the new RC were created. // TODO: Maybe switch PodsCreated to just check owner references. - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) + pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas) framework.ExpectNoError(err) // Wait for the pods to enter the running state. Waiting loops until the pods @@ -487,9 +487,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri if pod.DeletionTimestamp != nil { continue } - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) if err != nil { - updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -509,7 +509,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } @@ -519,18 +519,18 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri // 2. Create a replication controller that wants to run 3 pods. // 3. Check replication controller conditions for a ReplicaFailure condition. // 4. Relax quota or scale down the controller and observe the condition is gone. -func testReplicationControllerConditionCheck(f *framework.Framework) { +func testReplicationControllerConditionCheck(ctx context.Context, f *framework.Framework) { c := f.ClientSet namespace := f.Namespace.Name name := "condition-test" framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -545,14 +545,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) generation := rc.Generation conditions := rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -571,7 +571,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name)) - rc, err = updateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) { + rc, err = updateReplicationControllerWithRetries(ctx, c, namespace, name, func(update *v1.ReplicationController) { x := int32(2) update.Spec.Replicas = &x }) @@ -581,7 +581,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { generation = rc.Generation conditions = rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -600,10 +600,10 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.ExpectNoError(err) } -func testRCAdoptMatchingOrphans(f *framework.Framework) { +func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) { name := "pod-adoption" ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) - p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ @@ -624,12 +624,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC if apierrors.IsNotFound(err) { return true, nil @@ -647,26 +647,26 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { framework.ExpectNoError(err) } -func testRCReleaseControlledNotMatching(f *framework.Framework) { +func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) { name := "pod-release" ginkgo.By("Given a ReplicationController is created") replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas) + pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas) framework.ExpectNoError(err) p := pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{}) if err != nil && apierrors.IsConflict(err) { return false, nil } @@ -679,7 +679,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rc.UID { @@ -699,17 +699,17 @@ type updateRcFunc func(d *v1.ReplicationController) // 1. Get latest resource // 2. applyUpdate // 3. Update the resource -func updateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) { +func updateReplicationControllerWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) { var rc *v1.ReplicationController var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { var err error - if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); err == nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating replication controller %q", name) return true, nil } @@ -769,11 +769,11 @@ func watchUntilWithoutRetry(ctx context.Context, watcher watch.Interface, condit return lastEvent, nil } -func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func() (bool, error) { - return func() (bool, error) { +func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { framework.Logf("Get Replication Controller %q to confirm replicas", rcName) - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(context.TODO(), rcName, metav1.GetOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(ctx, rcName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 1f59d0e0c38..3d192ac8ab0 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -109,18 +109,18 @@ var _ = SIGDescribe("ReplicaSet", func() { Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. */ framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { - testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage) + testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) }) ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images e2eskipper.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) - testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage()) + testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage()) }) ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { - testReplicaSetConditionCheck(f) + testReplicaSetConditionCheck(ctx, f) }) /* @@ -129,7 +129,7 @@ var _ = SIGDescribe("ReplicaSet", func() { Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references */ framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) { - testRSAdoptMatchingAndReleaseNotMatching(f) + testRSAdoptMatchingAndReleaseNotMatching(ctx, f) }) /* @@ -141,7 +141,7 @@ var _ = SIGDescribe("ReplicaSet", func() { a scale subresource. */ framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) { - testRSScaleSubresources(f) + testRSScaleSubresources(ctx, f) }) /* @@ -152,7 +152,7 @@ var _ = SIGDescribe("ReplicaSet", func() { The RS MUST be patched and verify that patch succeeded. */ framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) { - testRSLifeCycle(f) + testRSLifeCycle(ctx, f) }) /* @@ -163,7 +163,7 @@ var _ = SIGDescribe("ReplicaSet", func() { MUST succeed when deleting the ReplicaSet via deleteCollection. */ framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) { - listRSDeleteCollection(f) + listRSDeleteCollection(ctx, f) }) @@ -174,13 +174,13 @@ var _ = SIGDescribe("ReplicaSet", func() { mutating sub-resource operations MUST be visible to subsequent reads. */ framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) { - testRSStatus(f) + testRSStatus(ctx, f) }) }) // A basic test to check the deployment of an image using a ReplicaSet. The // image serves its hostname which is checked for each replica. -func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) { +func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) replicas := int32(1) @@ -190,12 +190,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s framework.Logf("Creating ReplicaSet %s", name) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS, metav1.CreateOptions{}) + _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{}) framework.ExpectNoError(err) // Check that pods for the new RS were created. // TODO: Maybe switch PodsCreated to just check owner references. - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) + pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas) framework.ExpectNoError(err) // Wait for the pods to enter the running state. Waiting loops until the pods @@ -206,9 +206,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s if pod.DeletionTimestamp != nil { continue } - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) if err != nil { - updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -228,7 +228,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } @@ -238,18 +238,18 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s // 2. Create a replica set that wants to run 3 pods. // 3. Check replica set conditions for a ReplicaFailure condition. // 4. Scale down the replica set and observe the condition is gone. -func testReplicaSetConditionCheck(f *framework.Framework) { +func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) { c := f.ClientSet namespace := f.Namespace.Name name := "condition-test" ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{}) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -264,14 +264,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = c.AppsV1().ReplicaSets(namespace).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) generation := rs.Generation conditions := rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -301,7 +301,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { generation = rs.Generation conditions = rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -320,10 +320,10 @@ func testReplicaSetConditionCheck(f *framework.Framework) { framework.ExpectNoError(err) } -func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { +func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) { name := "pod-adoption-release" ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) - p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ @@ -344,12 +344,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { replicas := int32(1) rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} - rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt, metav1.CreateOptions{}) + rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet if apierrors.IsNotFound(err) { return true, nil @@ -367,16 +367,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas) + pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas) framework.ExpectNoError(err) p = &pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{}) if err != nil && apierrors.IsConflict(err) { return false, nil } @@ -389,7 +389,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rs.UID { @@ -403,7 +403,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { framework.ExpectNoError(err) } -func testRSScaleSubresources(f *framework.Framework) { +func testRSScaleSubresources(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -417,15 +417,15 @@ func testRSScaleSubresources(f *framework.Framework) { replicas := int32(1) ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName)) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) - _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) + _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) ginkgo.By("getting scale subresource") - scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rsName, metav1.GetOptions{}) + scale, err := c.AppsV1().ReplicaSets(ns).GetScale(ctx, rsName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } @@ -435,14 +435,14 @@ func testRSScaleSubresources(f *framework.Framework) { ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = 2 - scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rsName, scale, metav1.UpdateOptions{}) + scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(ctx, rsName, scale, metav1.UpdateOptions{}) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) ginkgo.By("verifying the replicaset Spec.Replicas was modified") - rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } @@ -458,17 +458,17 @@ func testRSScaleSubresources(f *framework.Framework) { }) framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") - _, err = c.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale") + _, err = c.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch replicaset: %v", err) - rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err) framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas") } // ReplicaSet Replace and Patch tests -func testRSLifeCycle(f *framework.Framework) { +func testRSLifeCycle(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet zero := int64(0) @@ -489,18 +489,18 @@ func testRSLifeCycle(f *framework.Framework) { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = label - return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(context.TODO(), options) + return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(ctx, options) }, } - rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: label}) + rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: label}) framework.ExpectNoError(err, "failed to list rsList") // Create a ReplicaSet rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) - _, err = c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) + _, err = c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "Failed to create pods: %s", err) // Scale the ReplicaSet @@ -531,12 +531,12 @@ func testRSLifeCycle(f *framework.Framework) { }, }) framework.ExpectNoError(err, "failed to Marshal ReplicaSet JSON patch") - _, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ReplicaSet") - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rset, ok := event.Object.(*appsv1.ReplicaSet); ok { found := rset.ObjectMeta.Name == rsName && rset.ObjectMeta.Labels["test-rs"] == "patched" && @@ -558,7 +558,7 @@ func testRSLifeCycle(f *framework.Framework) { } // List and DeleteCollection operations -func listRSDeleteCollection(f *framework.Framework) { +func listRSDeleteCollection(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -577,32 +577,32 @@ func listRSDeleteCollection(f *framework.Framework) { ginkgo.By("Create a ReplicaSet") rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) - _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verify that the required pods have come up") - err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "Failed to create pods: %s", err) - r, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) + r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get ReplicaSets") framework.Logf("Replica Status: %+v", r.Status) ginkgo.By("Listing all ReplicaSets") - rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) + rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) framework.ExpectNoError(err, "failed to list ReplicaSets") framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found") ginkgo.By("DeleteCollection of the ReplicaSets") - err = rsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) + err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) framework.ExpectNoError(err, "failed to delete ReplicaSets") ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted") - rsList, err = c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) + rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) framework.ExpectNoError(err, "failed to list ReplicaSets") framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas") } -func testRSStatus(f *framework.Framework) { +func testRSStatus(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet rsClient := c.AppsV1().ReplicaSets(ns) @@ -620,24 +620,24 @@ func testRSStatus(f *framework.Framework) { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return rsClient.Watch(context.TODO(), options) + return rsClient.Watch(ctx, options) }, } - rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Replicasets") ginkgo.By("Create a Replicaset") rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) - testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) + testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verify that the required pods have come up.") - err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "Failed to create pods: %s", err) ginkgo.By("Getting /status") rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"} - rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(context.TODO(), rsName, metav1.GetOptions{}, "status") + rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(ctx, rsName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns) rsStatusBytes, err := json.Marshal(rsStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) @@ -651,7 +651,7 @@ func testRSStatus(f *framework.Framework) { var statusToUpdate, updatedStatus *appsv1.ReplicaSet err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) + statusToUpdate, err = rsClient.Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName) statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{ @@ -661,16 +661,16 @@ func testRSStatus(f *framework.Framework) { Message: "Set from e2e test", }) - updatedStatus, err = rsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = rsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update status. %v", err) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the ReplicaSet status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), rsRetryTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, rsRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && @@ -701,14 +701,14 @@ func testRSStatus(f *framework.Framework) { payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) framework.Logf("Patch payload: %v", string(payload)) - patchedReplicaSet, err := rsClient.Patch(context.TODO(), rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") + patchedReplicaSet, err := rsClient.Patch(ctx, rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch status. %v", err) framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions) ginkgo.By("watching for the Replicaset status to be patched") - ctx, cancel = context.WithTimeout(context.Background(), rsRetryTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, rsRetryTimeout) defer cancel() - _, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index ed83fa6d11e..55ca49af3ce 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -110,82 +110,82 @@ var _ = SIGDescribe("StatefulSet", func() { var statefulPodMounts, podMounts []v1.VolumeMount var ss *appsv1.StatefulSet - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ss = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := c.CoreV1().Services(ns).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { - e2eoutput.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(ctx, c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) - e2estatefulset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. ginkgo.It("should provide basic identity", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) *(ss.Spec.Replicas) = 3 e2estatefulset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + ss.Name) - e2estatefulset.Saturate(c, ss) + e2estatefulset.Saturate(ctx, c, ss) ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(ctx, c, ss, "/data")) ginkgo.By("Verifying statefulset provides a stable hostname for each pod") - framework.ExpectNoError(e2estatefulset.CheckHostname(c, ss)) + framework.ExpectNoError(e2estatefulset.CheckHostname(ctx, c, ss)) ginkgo.By("Verifying statefulset set proper service name") framework.ExpectNoError(e2estatefulset.CheckServiceName(ss, headlessSvcName)) cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(ctx, c, ss, cmd)) ginkgo.By("Restarting statefulset " + ss.Name) - e2estatefulset.Restart(c, ss) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.Restart(ctx, c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(ctx, c, ss, "/data")) cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(ctx, c, ss, cmd)) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. ginkgo.It("should adopt matching orphans and release non-matching pods", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) *(ss.Spec.Replicas) = 1 e2estatefulset.PauseNewPods(ss) // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. kind := ss.Kind - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ss.Kind = kind ginkgo.By("Saturating stateful set " + ss.Name) - e2estatefulset.Saturate(c, ss) - pods := e2estatefulset.GetPodList(c, ss) + e2estatefulset.Saturate(ctx, c, ss) + pods := e2estatefulset.GetPodList(ctx, c, ss) gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas))) ginkgo.By("Checking that stateful set pods are created with ControllerRef") @@ -197,12 +197,12 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectEqual(controllerRef.UID, ss.UID) ginkgo.By("Orphaning one of the stateful set's pods") - e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) ginkgo.By("Checking that the stateful set readopts the pod") - gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", statefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(ctx, c, pod.Namespace, pod.Name, "adopted", statefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -217,12 +217,12 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Removing the labels from one of the stateful set's pods") prevLabels := pod.Labels - e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) ginkgo.By("Checking that the stateful set releases the pod") - gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", statefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(ctx, c, pod.Namespace, pod.Name, "released", statefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { @@ -234,12 +234,12 @@ var _ = SIGDescribe("StatefulSet", func() { // If we don't do this, the test leaks the Pod and PVC. ginkgo.By("Readding labels to the stateful set's pod") - e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { pod.Labels = prevLabels }) ginkgo.By("Checking that the stateful set readopts the pod") - gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", statefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(ctx, c, pod.Namespace, pod.Name, "adopted", statefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -257,45 +257,45 @@ var _ = SIGDescribe("StatefulSet", func() { // StorageClass and a dynamic provisioner. ginkgo.It("should not deadlock when a pod's predecessor fails", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) *(ss.Spec.Replicas) = 2 e2estatefulset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunning(c, 1, 0, ss) + e2estatefulset.WaitForRunning(ctx, c, 1, 0, ss) ginkgo.By("Resuming stateful pod at index 0.") - e2estatefulset.ResumeNextPod(c, ss) + e2estatefulset.ResumeNextPod(ctx, c, ss) ginkgo.By("Waiting for stateful pod at index 1 to enter running.") - e2estatefulset.WaitForRunning(c, 2, 1, ss) + e2estatefulset.WaitForRunning(ctx, c, 2, 1, ss) // Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not* // create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till // we set the healthy bit. ginkgo.By("Deleting healthy stateful pod at index 0.") - deleteStatefulPodAtIndex(c, 0, ss) + deleteStatefulPodAtIndex(ctx, c, 0, ss) ginkgo.By("Confirming stateful pod at index 0 is recreated.") - e2estatefulset.WaitForRunning(c, 2, 1, ss) + e2estatefulset.WaitForRunning(ctx, c, 2, 1, ss) ginkgo.By("Resuming stateful pod at index 1.") - e2estatefulset.ResumeNextPod(c, ss) + e2estatefulset.ResumeNextPod(ctx, c, ss) ginkgo.By("Confirming all stateful pods in statefulset are created.") - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) }) // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet with PVCs") - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) *(ss.Spec.Replicas) = 3 - rollbackTest(c, ns, ss) + rollbackTest(ctx, c, ns, ss) }) /* @@ -306,7 +306,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet") ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) - rollbackTest(c, ns, ss) + rollbackTest(ctx, c, ns, ss) }) /* @@ -328,14 +328,14 @@ var _ = SIGDescribe("StatefulSet", func() { }()} }(), } - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - ss = waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2estatefulset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", pods.Items[i].Namespace, @@ -348,13 +348,13 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image") - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) framework.ExpectNoError(err) ginkgo.By("Creating a new revision") - ss = waitForStatus(c, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update") @@ -383,7 +383,7 @@ var _ = SIGDescribe("StatefulSet", func() { }()} }(), } - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.RollingUpdateStatefulSetStrategyType, RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy { @@ -396,7 +396,7 @@ var _ = SIGDescribe("StatefulSet", func() { } }) framework.ExpectNoError(err) - ss, pods = waitForPartitionedRollingUpdate(c, ss) + ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", @@ -424,11 +424,11 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Restoring Pods to the correct revision when they are deleted") - deleteStatefulPodAtIndex(c, 0, ss) - deleteStatefulPodAtIndex(c, 2, ss) - e2estatefulset.WaitForRunningAndReady(c, 3, ss) - ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2estatefulset.GetPodList(c, ss) + deleteStatefulPodAtIndex(ctx, c, 0, ss) + deleteStatefulPodAtIndex(ctx, c, 2, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 3, ss) + ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name) + pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", @@ -457,7 +457,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Performing a phased rolling update") for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- { - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.RollingUpdateStatefulSetStrategyType, RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy { @@ -469,7 +469,7 @@ var _ = SIGDescribe("StatefulSet", func() { } }) framework.ExpectNoError(err) - ss, pods = waitForPartitionedRollingUpdate(c, ss) + ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", @@ -513,14 +513,14 @@ var _ = SIGDescribe("StatefulSet", func() { ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.OnDeleteStatefulSetStrategyType, } - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - ss = waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2estatefulset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -530,12 +530,12 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Restoring Pods to the current revision") - deleteStatefulPodAtIndex(c, 0, ss) - deleteStatefulPodAtIndex(c, 1, ss) - deleteStatefulPodAtIndex(c, 2, ss) - e2estatefulset.WaitForRunningAndReady(c, 3, ss) - ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2estatefulset.GetPodList(c, ss) + deleteStatefulPodAtIndex(ctx, c, 0, ss) + deleteStatefulPodAtIndex(ctx, c, 1, ss) + deleteStatefulPodAtIndex(ctx, c, 2, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 3, ss) + ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name) + pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -548,23 +548,23 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image") - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) framework.ExpectNoError(err) ginkgo.By("Creating a new revision") - ss = waitForStatus(c, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update") ginkgo.By("Recreating Pods at the new revision") - deleteStatefulPodAtIndex(c, 0, ss) - deleteStatefulPodAtIndex(c, 1, ss) - deleteStatefulPodAtIndex(c, 2, ss) - e2estatefulset.WaitForRunningAndReady(c, 3, ss) - ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2estatefulset.GetPodList(c, ss) + deleteStatefulPodAtIndex(ctx, c, 0, ss) + deleteStatefulPodAtIndex(ctx, c, 1, ss) + deleteStatefulPodAtIndex(ctx, c, 2, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 3, ss) + ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name) + pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, @@ -589,11 +589,11 @@ var _ = SIGDescribe("StatefulSet", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.LabelSelector = psLabels.AsSelector().String() - return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options) }, } ginkgo.By("Initializing watcher for selector " + psLabels.String()) - pl, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + pl, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) framework.ExpectNoError(err) @@ -607,7 +607,7 @@ var _ = SIGDescribe("StatefulSet", func() { defer wg.Done() expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"} - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulSetTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulSetTimeout) defer cancel() _, orderErr = watchtools.Until(ctx, pl.ResourceVersion, w, func(event watch.Event) (bool, error) { @@ -625,29 +625,29 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) setHTTPProbe(ss) - ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod") - breakHTTPProbe(c, ss) - waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) - e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) - e2estatefulset.UpdateReplicas(c, ss, 3) - confirmStatefulPodCount(c, 1, ss, 10*time.Second, true) + breakHTTPProbe(ctx, c, ss) + waitForRunningAndNotReady(ctx, c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 0) + e2estatefulset.UpdateReplicas(ctx, c, ss, 3) + confirmStatefulPodCount(ctx, c, 1, ss, 10*time.Second, true) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) - restoreHTTPProbe(c, ss) - e2estatefulset.WaitForRunningAndReady(c, 3, ss) + restoreHTTPProbe(ctx, c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 3, ss) ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order") wg.Wait() framework.ExpectNoError(orderErr) ginkgo.By("Scale down will halt with unhealthy stateful pod") - pl, err = f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + pl, err = f.ClientSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) framework.ExpectNoError(err) @@ -659,7 +659,7 @@ var _ = SIGDescribe("StatefulSet", func() { defer wg.Done() expectedOrder := []string{ssName + "-2", ssName + "-1", ssName + "-0"} - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulSetTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulSetTimeout) defer cancel() _, orderErr = watchtools.Until(ctx, pl.ResourceVersion, w, func(event watch.Event) (bool, error) { @@ -674,15 +674,15 @@ var _ = SIGDescribe("StatefulSet", func() { }) }() - breakHTTPProbe(c, ss) - e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) - waitForRunningAndNotReady(c, 3, ss) - e2estatefulset.UpdateReplicas(c, ss, 0) - confirmStatefulPodCount(c, 3, ss, 10*time.Second, true) + breakHTTPProbe(ctx, c, ss) + e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 0) + waitForRunningAndNotReady(ctx, c, 3, ss) + e2estatefulset.UpdateReplicas(ctx, c, ss, 0) + confirmStatefulPodCount(ctx, c, 3, ss, 10*time.Second, true) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) - restoreHTTPProbe(c, ss) - e2estatefulset.Scale(c, ss, 0) + restoreHTTPProbe(ctx, c, ss) + e2estatefulset.Scale(ctx, c, ss, 0) ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order") wg.Wait() @@ -701,34 +701,34 @@ var _ = SIGDescribe("StatefulSet", func() { ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") - breakHTTPProbe(c, ss) - waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) - e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) - e2estatefulset.UpdateReplicas(c, ss, 3) - confirmStatefulPodCount(c, 3, ss, 10*time.Second, false) + breakHTTPProbe(ctx, c, ss) + waitForRunningAndNotReady(ctx, c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 0) + e2estatefulset.UpdateReplicas(ctx, c, ss, 3) + confirmStatefulPodCount(ctx, c, 3, ss, 10*time.Second, false) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) - restoreHTTPProbe(c, ss) - e2estatefulset.WaitForRunningAndReady(c, 3, ss) + restoreHTTPProbe(ctx, c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 3, ss) ginkgo.By("Scale down will not halt with unhealthy stateful pod") - breakHTTPProbe(c, ss) - e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) - waitForRunningAndNotReady(c, 3, ss) - e2estatefulset.UpdateReplicas(c, ss, 0) - confirmStatefulPodCount(c, 0, ss, 10*time.Second, false) + breakHTTPProbe(ctx, c, ss) + e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 0) + waitForRunningAndNotReady(ctx, c, 3, ss) + e2estatefulset.UpdateReplicas(ctx, c, ss, 0) + confirmStatefulPodCount(ctx, c, 0, ss, 10*time.Second, false) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) - restoreHTTPProbe(c, ss) - e2estatefulset.Scale(c, ss, 0) - e2estatefulset.WaitForStatusReplicas(c, ss, 0) + restoreHTTPProbe(ctx, c, ss) + e2estatefulset.Scale(ctx, c, ss, 0) + e2estatefulset.WaitForStatusReplicas(ctx, c, ss, 0) }) /* @@ -740,7 +740,7 @@ var _ = SIGDescribe("StatefulSet", func() { podName := "test-pod" statefulPodName := ssName + "-0" ginkgo.By("Looking for a node to schedule stateful set and pod") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name) @@ -760,10 +760,10 @@ var _ = SIGDescribe("StatefulSet", func() { NodeName: node.Name, }, } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) - if err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name); err != nil { + if err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name); err != nil { framework.Failf("Pod %v did not start running: %v", podName, err) } @@ -772,14 +772,14 @@ var _ = SIGDescribe("StatefulSet", func() { statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) ss.Spec.Template.Spec.NodeName = node.Name - _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) var initialStatefulPodUID types.UID ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) fieldSelector := fields.OneTermEqualSelector("metadata.name", statefulPodName).String() - pl, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + pl, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ FieldSelector: fieldSelector, }) framework.ExpectNoError(err) @@ -793,7 +793,7 @@ var _ = SIGDescribe("StatefulSet", func() { lw := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(ctx, options) }, } ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulPodTimeout) @@ -819,13 +819,13 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") // we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry - gomega.Eventually(func() error { - statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), statefulPodName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() error { + statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, statefulPodName, metav1.GetOptions{}) if err != nil { return err } @@ -849,13 +849,13 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + waitForStatus(ctx, c, ss) ginkgo.By("getting scale subresource") - scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) + scale, err := c.AppsV1().StatefulSets(ns).GetScale(ctx, ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } @@ -865,14 +865,14 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = 2 - scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale, metav1.UpdateOptions{}) + scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ctx, ssName, scale, metav1.UpdateOptions{}) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) ginkgo.By("verifying the statefulset Spec.Replicas was modified") - ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } @@ -888,11 +888,11 @@ var _ = SIGDescribe("StatefulSet", func() { }) framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") - _, err = c.AppsV1().StatefulSets(ns).Patch(context.TODO(), ssName, types.StrategicMergePatchType, []byte(ssScalePatchPayload), metav1.PatchOptions{}, "scale") + _, err = c.AppsV1().StatefulSets(ns).Patch(ctx, ssName, types.StrategicMergePatchType, []byte(ssScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch stateful set: %v", err) ginkgo.By("verifying the statefulset Spec.Replicas was modified") - ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err) framework.ExpectEqual(*(ss.Spec.Replicas), int32(4), "statefulset should have 4 replicas") }) @@ -919,10 +919,10 @@ var _ = SIGDescribe("StatefulSet", func() { } ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, ssPodLabels) setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + waitForStatus(ctx, c, ss) ginkgo.By("patching the StatefulSet") ssPatch, err := json.Marshal(map[string]interface{}{ @@ -943,26 +943,26 @@ var _ = SIGDescribe("StatefulSet", func() { }, }) framework.ExpectNoError(err, "failed to Marshal StatefulSet JSON patch") - _, err = f.ClientSet.AppsV1().StatefulSets(ns).Patch(context.TODO(), ssName, types.StrategicMergePatchType, []byte(ssPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.AppsV1().StatefulSets(ns).Patch(ctx, ssName, types.StrategicMergePatchType, []byte(ssPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Set") - ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err) framework.ExpectEqual(*(ss.Spec.Replicas), ssPatchReplicas, "statefulset should have 2 replicas") framework.ExpectEqual(ss.Spec.Template.Spec.Containers[0].Image, ssPatchImage, "statefulset not using ssPatchImage. Is using %v", ss.Spec.Template.Spec.Containers[0].Image) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + waitForStatus(ctx, c, ss) ginkgo.By("Listing all StatefulSets") - ssList, err := c.AppsV1().StatefulSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-ss=patched"}) + ssList, err := c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"}) framework.ExpectNoError(err, "failed to list StatefulSets") framework.ExpectEqual(len(ssList.Items), 1, "filtered list wasn't found") ginkgo.By("Delete all of the StatefulSets") - err = c.AppsV1().StatefulSets(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "test-ss=patched"}) + err = c.AppsV1().StatefulSets(ns).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "test-ss=patched"}) framework.ExpectNoError(err, "failed to delete StatefulSets") ginkgo.By("Verify that StatefulSets have been deleted") - ssList, err = c.AppsV1().StatefulSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-ss=patched"}) + ssList, err = c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"}) framework.ExpectNoError(err, "failed to list StatefulSets") framework.ExpectEqual(len(ssList.Items), 0, "filtered list should have no Statefulsets") }) @@ -981,28 +981,28 @@ var _ = SIGDescribe("StatefulSet", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return ssClient.Watch(context.TODO(), options) + return ssClient.Watch(ctx, options) }, } - ssList, err := c.AppsV1().StatefulSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + ssList, err := c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list StatefulSets") ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) - ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + waitForStatus(ctx, c, ss) ginkgo.By("Patch Statefulset to include a label") payload := []byte(`{"metadata":{"labels":{"e2e":"testing"}}}`) - ss, err = ssClient.Patch(context.TODO(), ssName, types.StrategicMergePatchType, payload, metav1.PatchOptions{}) + ss, err = ssClient.Patch(ctx, ssName, types.StrategicMergePatchType, payload, metav1.PatchOptions{}) framework.ExpectNoError(err) ginkgo.By("Getting /status") ssResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"} - ssStatusUnstructured, err := f.DynamicClient.Resource(ssResource).Namespace(ns).Get(context.TODO(), ssName, metav1.GetOptions{}, "status") + ssStatusUnstructured, err := f.DynamicClient.Resource(ssResource).Namespace(ns).Get(ctx, ssName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch the status of replica set %s in namespace %s", ssName, ns) ssStatusBytes, err := json.Marshal(ssStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) @@ -1016,7 +1016,7 @@ var _ = SIGDescribe("StatefulSet", func() { var statusToUpdate, updatedStatus *appsv1.StatefulSet err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = ssClient.Get(context.TODO(), ssName, metav1.GetOptions{}) + statusToUpdate, err = ssClient.Get(ctx, ssName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to retrieve statefulset %s", ssName) statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.StatefulSetCondition{ @@ -1026,7 +1026,7 @@ var _ = SIGDescribe("StatefulSet", func() { Message: "Set from e2e test", }) - updatedStatus, err = ssClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = ssClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update status. %v", err) @@ -1034,10 +1034,10 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("watching for the statefulset status to be updated") - ctx, cancel := context.WithTimeout(ctx, statefulSetTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, statefulSetTimeout) defer cancel() - _, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { if e, ok := event.Object.(*appsv1.StatefulSet); ok { found := e.ObjectMeta.Name == ss.ObjectMeta.Name && @@ -1068,14 +1068,14 @@ var _ = SIGDescribe("StatefulSet", func() { payload = []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) framework.Logf("Patch payload: %v", string(payload)) - patchedStatefulSet, err := ssClient.Patch(context.TODO(), ssName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") + patchedStatefulSet, err := ssClient.Patch(ctx, ssName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch status. %v", err) framework.Logf("Patched status conditions: %#v", patchedStatefulSet.Status.Conditions) ginkgo.By("watching for the Statefulset status to be patched") - ctx, cancel = context.WithTimeout(context.Background(), statefulSetTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, statefulSetTimeout) - _, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { defer cancel() if e, ok := event.Object.(*appsv1.StatefulSet); ok { @@ -1104,48 +1104,48 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() { var appTester *clusterAppTester - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { appTester = &clusterAppTester{client: c, ns: ns} }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { - e2eoutput.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(ctx, c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) - e2estatefulset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. ginkgo.It("should creating a working zookeeper cluster", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) appTester.statefulPod = &zookeeperTester{client: c} - appTester.run() + appTester.run(ctx) }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. ginkgo.It("should creating a working redis cluster", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) appTester.statefulPod = &redisTester{client: c} - appTester.run() + appTester.run(ctx) }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. ginkgo.It("should creating a working mysql cluster", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) appTester.statefulPod = &mysqlGaleraTester{client: c} - appTester.run() + appTester.run(ctx) }) // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. ginkgo.It("should creating a working CockroachDB cluster", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) appTester.statefulPod = &cockroachDBTester{client: c} - appTester.run() + appTester.run(ctx) }) }) @@ -1161,9 +1161,9 @@ var _ = SIGDescribe("StatefulSet", func() { } ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, ssPodLabels) setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 1) + e2estatefulset.WaitForStatusAvailableReplicas(ctx, c, ss, 1) }) ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func(ctx context.Context) { @@ -1177,30 +1177,30 @@ var _ = SIGDescribe("StatefulSet", func() { ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, nil, nil, ssPodLabels) ss.Spec.MinReadySeconds = 30 setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0) + e2estatefulset.WaitForStatusAvailableReplicas(ctx, c, ss, 0) // let's check that the availableReplicas have still not updated time.Sleep(5 * time.Second) - ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ss.Name, metav1.GetOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ss.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if ss.Status.AvailableReplicas != 0 { framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 0, ss.Status.AvailableReplicas) } - e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) + e2estatefulset.WaitForStatusAvailableReplicas(ctx, c, ss, 2) - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.MinReadySeconds = 3600 }) framework.ExpectNoError(err) // We don't expect replicas to be updated till 1 hour, so the availableReplicas should be 0 - e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 0) + e2estatefulset.WaitForStatusAvailableReplicas(ctx, c, ss, 0) - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.MinReadySeconds = 0 }) framework.ExpectNoError(err) - e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) + e2estatefulset.WaitForStatusAvailableReplicas(ctx, c, ss, 2) ginkgo.By("check availableReplicas are shown in status") out, err := e2ekubectl.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml") @@ -1220,83 +1220,83 @@ var _ = SIGDescribe("StatefulSet", func() { var statefulPodMounts, podMounts []v1.VolumeMount var ss *appsv1.StatefulSet - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ss = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := c.CoreV1().Services(ns).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { - e2eoutput.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(ctx, c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) - e2estatefulset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) ginkgo.It("should delete PVCs with a WhenDeleted policy", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ WhenDeleted: appsv1.DeletePersistentVolumeClaimRetentionPolicyType, } - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Confirming all 3 PVCs exist with their owner refs") - err = verifyStatefulSetPVCsExistWithOwnerRefs(c, ss, []int{0, 1, 2}, true, false) + err = verifyStatefulSetPVCsExistWithOwnerRefs(ctx, c, ss, []int{0, 1, 2}, true, false) framework.ExpectNoError(err) ginkgo.By("Deleting stateful set " + ss.Name) - err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) + err = c.AppsV1().StatefulSets(ns).Delete(ctx, ss.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying PVCs deleted") - err = verifyStatefulSetPVCsExist(c, ss, []int{}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{}) framework.ExpectNoError(err) }) ginkgo.It("should delete PVCs with a OnScaledown policy", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ WhenScaled: appsv1.DeletePersistentVolumeClaimRetentionPolicyType, } - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Confirming all 3 PVCs exist") - err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0, 1, 2}) framework.ExpectNoError(err) ginkgo.By("Scaling stateful set " + ss.Name + " to one replica") - ss, err = e2estatefulset.Scale(c, ss, 1) + ss, err = e2estatefulset.Scale(ctx, c, ss, 1) framework.ExpectNoError(err) ginkgo.By("Verifying all but one PVC deleted") - err = verifyStatefulSetPVCsExist(c, ss, []int{0}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0}) framework.ExpectNoError(err) }) ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ WhenDeleted: appsv1.DeletePersistentVolumeClaimRetentionPolicyType, } - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Confirming all 3 PVCs exist with their owner refs") - err = verifyStatefulSetPVCsExistWithOwnerRefs(c, ss, []int{0, 1, 2}, true, false) + err = verifyStatefulSetPVCsExistWithOwnerRefs(ctx, c, ss, []int{0, 1, 2}, true, false) framework.ExpectNoError(err) ginkgo.By("Orphaning the 3rd pod") @@ -1304,30 +1304,30 @@ var _ = SIGDescribe("StatefulSet", func() { OwnerReferences: []metav1.OwnerReference{}, }) framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") - _, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") + _, err = c.CoreV1().Pods(ns).Patch(ctx, fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") framework.ExpectNoError(err, "Could not patch payload") ginkgo.By("Deleting stateful set " + ss.Name) - err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) + err = c.AppsV1().StatefulSets(ns).Delete(ctx, ss.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying PVCs deleted") - err = verifyStatefulSetPVCsExist(c, ss, []int{}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{}) framework.ExpectNoError(err) }) ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 ss.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ WhenScaled: appsv1.DeletePersistentVolumeClaimRetentionPolicyType, } - _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Confirming all 3 PVCs exist") - err = verifyStatefulSetPVCsExist(c, ss, []int{0, 1, 2}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0, 1, 2}) framework.ExpectNoError(err) ginkgo.By("Orphaning the 3rd pod") @@ -1335,15 +1335,15 @@ var _ = SIGDescribe("StatefulSet", func() { OwnerReferences: []metav1.OwnerReference{}, }) framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") - _, err = c.CoreV1().Pods(ns).Patch(context.TODO(), fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") + _, err = c.CoreV1().Pods(ns).Patch(ctx, fmt.Sprintf("%s-2", ss.Name), types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "") framework.ExpectNoError(err, "Could not patch payload") ginkgo.By("Scaling stateful set " + ss.Name + " to one replica") - ss, err = e2estatefulset.Scale(c, ss, 1) + ss, err = e2estatefulset.Scale(ctx, c, ss, 1) framework.ExpectNoError(err) ginkgo.By("Verifying all but one PVC deleted") - err = verifyStatefulSetPVCsExist(c, ss, []int{0}) + err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0}) framework.ExpectNoError(err) }) }) @@ -1362,7 +1362,7 @@ func kubectlExecWithRetries(ns string, args ...string) (out string) { } type statefulPodTester interface { - deploy(ns string) *appsv1.StatefulSet + deploy(ctx context.Context, ns string) *appsv1.StatefulSet write(statefulPodIndex int, kv map[string]string) read(statefulPodIndex int, key string) string name() string @@ -1374,9 +1374,9 @@ type clusterAppTester struct { client clientset.Interface } -func (c *clusterAppTester) run() { +func (c *clusterAppTester) run(ctx context.Context) { ginkgo.By("Deploying " + c.statefulPod.name()) - ss := c.statefulPod.deploy(c.ns) + ss := c.statefulPod.deploy(ctx, c.ns) ginkgo.By("Creating foo:bar in member with index 0") c.statefulPod.write(0, map[string]string{"foo": "bar"}) @@ -1387,13 +1387,13 @@ func (c *clusterAppTester) run() { default: if restartCluster { ginkgo.By("Restarting stateful set " + ss.Name) - e2estatefulset.Restart(c.client, ss) - e2estatefulset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss) + e2estatefulset.Restart(ctx, c.client, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c.client, *ss.Spec.Replicas, ss) } } ginkgo.By("Reading value under foo from member with index 2") - if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil { + if err := pollReadWithTimeout(ctx, c.statefulPod, 2, "foo", "bar"); err != nil { framework.Failf("%v", err) } } @@ -1407,8 +1407,8 @@ func (z *zookeeperTester) name() string { return "zookeeper" } -func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet { - z.ss = e2estatefulset.CreateStatefulSet(z.client, zookeeperManifestPath, ns) +func (z *zookeeperTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet { + z.ss = e2estatefulset.CreateStatefulSet(ctx, z.client, zookeeperManifestPath, ns) return z.ss } @@ -1443,8 +1443,8 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string { return kubectlExecWithRetries(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) } -func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet { - m.ss = e2estatefulset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns) +func (m *mysqlGaleraTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet { + m.ss = e2estatefulset.CreateStatefulSet(ctx, m.client, mysqlGaleraManifestPath, ns) framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name) for _, cmd := range []string{ @@ -1483,8 +1483,8 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string { return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) } -func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { - m.ss = e2estatefulset.CreateStatefulSet(m.client, redisManifestPath, ns) +func (m *redisTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet { + m.ss = e2estatefulset.CreateStatefulSet(ctx, m.client, redisManifestPath, ns) return m.ss } @@ -1514,8 +1514,8 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) } -func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { - c.ss = e2estatefulset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns) +func (c *cockroachDBTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet { + c.ss = e2estatefulset.CreateStatefulSet(ctx, c.client, cockroachDBManifestPath, ns) framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name) for _, cmd := range []string{ "CREATE DATABASE IF NOT EXISTS foo;", @@ -1543,8 +1543,8 @@ func lastLine(out string) string { return outLines[len(outLines)-1] } -func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, key, expectedVal string) error { - err := wait.PollImmediate(time.Second, readTimeout, func() (bool, error) { +func pollReadWithTimeout(ctx context.Context, statefulPod statefulPodTester, statefulPodNumber int, key, expectedVal string) error { + err := wait.PollImmediateWithContext(ctx, time.Second, readTimeout, func(ctx context.Context) (bool, error) { val := statefulPod.read(statefulPodNumber, key) if val == "" { return false, nil @@ -1562,16 +1562,16 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k // This function is used by two tests to test StatefulSet rollbacks: one using // PVCs and one using no storage. -func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { +func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *appsv1.StatefulSet) { setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - ss = waitForStatus(c, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2estatefulset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -1582,29 +1582,29 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { e2estatefulset.SortStatefulPods(pods) err = breakPodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name) + ss, _ = waitForPodNotReady(ctx, c, ss, pods.Items[1].Name) newImage := NewWebserverImage oldImage := ss.Spec.Template.Spec.Containers[0].Image ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage)) framework.ExpectNotEqual(oldImage, newImage, "Incorrect test setup: should update to a different image") - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) framework.ExpectNoError(err) ginkgo.By("Creating a new revision") - ss = waitForStatus(c, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update") ginkgo.By("Updating Pods in reverse ordinal order") - pods = e2estatefulset.GetPodList(c, ss) + pods = e2estatefulset.GetPodList(ctx, c, ss) e2estatefulset.SortStatefulPods(pods) err = restorePodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) - ss, pods = waitForRollingUpdate(c, ss) + ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name) + ss, pods = waitForRollingUpdate(ctx, c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, ss.Name, @@ -1626,23 +1626,23 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { ginkgo.By("Rolling back to a previous revision") err = breakPodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name) + ss, _ = waitForPodNotReady(ctx, c, ss, pods.Items[1].Name) priorRevision := currentRevision - ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = oldImage }) framework.ExpectNoError(err) - ss = waitForStatus(c, ss) + ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(priorRevision, updateRevision, "Prior revision should equal update revision during roll back") framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back") ginkgo.By("Rolling back update in reverse ordinal order") - pods = e2estatefulset.GetPodList(c, ss) + pods = e2estatefulset.GetPodList(ctx, c, ss) e2estatefulset.SortStatefulPods(pods) restorePodHTTPProbe(ss, &pods.Items[1]) - ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) - ss, pods = waitForRollingUpdate(c, ss) + ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name) + ss, pods = waitForRollingUpdate(ctx, c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", ss.Namespace, ss.Name, @@ -1665,11 +1665,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { // confirmStatefulPodCount asserts that the current number of Pods in ss is count, waiting up to timeout for ss to // to scale to count. -func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) { +func confirmStatefulPodCount(ctx context.Context, c clientset.Interface, count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) { start := time.Now() deadline := start.Add(timeout) - for t := time.Now(); t.Before(deadline); t = time.Now() { - podList := e2estatefulset.GetPodList(c, ss) + for t := time.Now(); t.Before(deadline) && ctx.Err() == nil; t = time.Now() { + podList := e2estatefulset.GetPodList(ctx, c, ss) statefulPodCount := len(podList.Items) if statefulPodCount != count { e2epod.LogPodStates(podList.Items) @@ -1694,14 +1694,14 @@ func setHTTPProbe(ss *appsv1.StatefulSet) { } // breakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss. -func breakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { +func breakHTTPProbe(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("path expected to be not empty: %v", path) } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path) - return e2estatefulset.ExecInStatefulPods(c, ss, cmd) + return e2estatefulset.ExecInStatefulPods(ctx, c, ss, cmd) } // breakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. @@ -1718,14 +1718,14 @@ func breakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { } // restoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss. -func restoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { +func restoreHTTPProbe(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("path expected to be not empty: %v", path) } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path) - return e2estatefulset.ExecInStatefulPods(c, ss, cmd) + return e2estatefulset.ExecInStatefulPods(ctx, c, ss, cmd) } // restorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod. @@ -1742,10 +1742,10 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { } // deleteStatefulPodAtIndex deletes the Pod with ordinal index in ss. -func deleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet) { +func deleteStatefulPodAtIndex(ctx context.Context, c clientset.Interface, index int, ss *appsv1.StatefulSet) { name := getStatefulSetPodNameAtIndex(index, ss) noGrace := int64(0) - if err := c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { + if err := c.CoreV1().Pods(ss.Namespace).Delete(ctx, name, metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { framework.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err) } } @@ -1760,16 +1760,16 @@ func getStatefulSetPodNameAtIndex(index int, ss *appsv1.StatefulSet) string { type updateStatefulSetFunc func(*appsv1.StatefulSet) // updateStatefulSetWithRetries updates statfulset template with retries. -func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) { +func updateStatefulSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) { statefulSets := c.AppsV1().StatefulSets(namespace) var updateErr error - pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if statefulSet, err = statefulSets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + pollErr := wait.PollWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + if statefulSet, err = statefulSets.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(statefulSet) - if statefulSet, err = statefulSets.Update(context.TODO(), statefulSet, metav1.UpdateOptions{}); err == nil { + if statefulSet, err = statefulSets.Update(ctx, statefulSet, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating stateful set %s", name) return true, nil } @@ -1783,8 +1783,8 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string, } // getStatefulSet gets the StatefulSet named name in namespace. -func getStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet { - ss, err := c.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func getStatefulSet(ctx context.Context, c clientset.Interface, namespace, name string) *appsv1.StatefulSet { + ss, err := c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err) } @@ -1792,13 +1792,13 @@ func getStatefulSet(c clientset.Interface, namespace, name string) *appsv1.State } // verifyStatefulSetPVCsExist confirms that exactly the PVCs for ss with the specified ids exist. This polls until the situation occurs, an error happens, or until timeout (in the latter case an error is also returned). Beware that this cannot tell if a PVC will be deleted at some point in the future, so if used to confirm that no PVCs are deleted, the caller should wait for some event giving the PVCs a reasonable chance to be deleted, before calling this function. -func verifyStatefulSetPVCsExist(c clientset.Interface, ss *appsv1.StatefulSet, claimIds []int) error { +func verifyStatefulSetPVCsExist(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, claimIds []int) error { idSet := map[int]struct{}{} for _, id := range claimIds { idSet[id] = struct{}{} } return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) { - pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err) return false, nil @@ -1832,18 +1832,18 @@ func verifyStatefulSetPVCsExist(c clientset.Interface, ss *appsv1.StatefulSet, c } // verifyStatefulSetPVCsExistWithOwnerRefs works as verifyStatefulSetPVCsExist, but also waits for the ownerRefs to match. -func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, ss *appsv1.StatefulSet, claimIndicies []int, wantSetRef, wantPodRef bool) error { +func verifyStatefulSetPVCsExistWithOwnerRefs(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, claimIndicies []int, wantSetRef, wantPodRef bool) error { indexSet := map[int]struct{}{} for _, id := range claimIndicies { indexSet[id] = struct{}{} } - set := getStatefulSet(c, ss.Namespace, ss.Name) + set := getStatefulSet(ctx, c, ss.Namespace, ss.Name) setUID := set.GetUID() if setUID == "" { framework.Failf("Statefulset %s missing UID", ss.Name) } return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) { - pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs for verification, retrying: %v", err) return false, nil @@ -1872,7 +1872,7 @@ func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, ss *appsv1.S } if ref.Kind == "Pod" { podName := fmt.Sprintf("%s-%d", ss.Name, ordinal) - pod, err := c.CoreV1().Pods(ss.Namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ss.Namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { framework.Logf("Pod %s not found, retrying (%v)", podName, err) return false, nil diff --git a/test/e2e/apps/ttl_after_finished.go b/test/e2e/apps/ttl_after_finished.go index 4cae35e27af..96b0acd1795 100644 --- a/test/e2e/apps/ttl_after_finished.go +++ b/test/e2e/apps/ttl_after_finished.go @@ -46,11 +46,11 @@ var _ = SIGDescribe("TTLAfterFinished", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) { - testFinishedJob(f) + testFinishedJob(ctx, f) }) }) -func cleanupJob(f *framework.Framework, job *batchv1.Job) { +func cleanupJob(ctx context.Context, f *framework.Framework, job *batchv1.Job) { ns := f.Namespace.Name c := f.ClientSet @@ -58,15 +58,15 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) { removeFinalizerFunc := func(j *batchv1.Job) { j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) } - _, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) + _, err := updateJobWithRetries(ctx, c, ns, job.Name, removeFinalizerFunc) framework.ExpectNoError(err) - e2ejob.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) + e2ejob.WaitForJobGone(ctx, c, ns, job.Name, wait.ForeverTestTimeout) - err = e2ejob.WaitForAllJobPodsGone(c, ns, job.Name) + err = e2ejob.WaitForAllJobPodsGone(ctx, c, ns, job.Name) framework.ExpectNoError(err) } -func testFinishedJob(f *framework.Framework) { +func testFinishedJob(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -81,19 +81,19 @@ func testFinishedJob(f *framework.Framework) { ginkgo.DeferCleanup(cleanupJob, f, job) framework.Logf("Create a Job %s/%s with TTL", ns, job.Name) - job, err := e2ejob.CreateJob(c, ns, job) + job, err := e2ejob.CreateJob(ctx, c, ns, job) framework.ExpectNoError(err) framework.Logf("Wait for the Job to finish") - err = e2ejob.WaitForJobFinish(c, ns, job.Name) + err = e2ejob.WaitForJobFinish(ctx, c, ns, job.Name) framework.ExpectNoError(err) framework.Logf("Wait for TTL after finished controller to delete the Job") - err = waitForJobDeleting(c, ns, job.Name) + err = waitForJobDeleting(ctx, c, ns, job.Name) framework.ExpectNoError(err) framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") - job, err = e2ejob.GetJob(c, ns, job.Name) + job, err = e2ejob.GetJob(ctx, c, ns, job.Name) framework.ExpectNoError(err) jobFinishTime := finishTime(job) finishTimeUTC := jobFinishTime.UTC() @@ -118,16 +118,16 @@ func finishTime(finishedJob *batchv1.Job) metav1.Time { } // updateJobWithRetries updates job with retries. -func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { +func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { jobs := c.BatchV1().Jobs(namespace) var updateErr error - pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - if job, err = jobs.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(job) - if job, err = jobs.Update(context.TODO(), job, metav1.UpdateOptions{}); err == nil { + if job, err = jobs.Update(ctx, job, metav1.UpdateOptions{}); err == nil { framework.Logf("Updating job %s", name) return true, nil } @@ -142,9 +142,9 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp // waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have // a non-nil deletionTimestamp (i.e. being deleted). -func waitForJobDeleting(c clientset.Interface, ns, jobName string) error { - return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) +func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error { + return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/wait.go b/test/e2e/apps/wait.go index d3e8d0ed480..ad0c84d74d3 100644 --- a/test/e2e/apps/wait.go +++ b/test/e2e/apps/wait.go @@ -17,6 +17,8 @@ limitations under the License. package apps import ( + "context" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -30,7 +32,7 @@ import ( // a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less // than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be // at its update revision. -func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { +func waitForPartitionedRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { var pods *v1.PodList if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s", @@ -43,7 +45,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful set.Namespace, set.Name) } - e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) @@ -84,8 +86,8 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful // waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // The returned StatefulSet contains such a StatefulSetStatus -func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { - e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { +func waitForStatus(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { + e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { if set2.Status.ObservedGeneration >= set.Generation { set = set2 return true, nil @@ -96,9 +98,9 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State } // waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. -func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { +func waitForPodNotReady(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { var pods *v1.PodList - e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -113,7 +115,7 @@ func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName // waitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to // complete. set must have a RollingUpdateStatefulSetStrategyType. -func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { +func waitForRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { var pods *v1.PodList if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { framework.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s", @@ -121,7 +123,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps set.Name, set.Spec.UpdateStrategy.Type) } - e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 if len(pods.Items) < int(*set.Spec.Replicas) { @@ -150,6 +152,6 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps } // waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready. -func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { - e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss) +func waitForRunningAndNotReady(ctx context.Context, c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { + e2estatefulset.WaitForRunning(ctx, c, numStatefulPods, 0, ss) } diff --git a/test/e2e/architecture/conformance.go b/test/e2e/architecture/conformance.go index ad371755dc8..67bc0bacae2 100644 --- a/test/e2e/architecture/conformance.go +++ b/test/e2e/architecture/conformance.go @@ -38,8 +38,8 @@ var _ = SIGDescribe("Conformance Tests", func() { */ framework.ConformanceIt("should have at least two untainted nodes", func(ctx context.Context) { ginkgo.By("Getting node addresses") - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 10*time.Minute)) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { framework.Failf("Conformance requires at least two nodes") diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index 61af59027c5..8f95f066a5f 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -90,7 +90,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { } // Grant permissions to the new user - clusterRole, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ + clusterRole, err := f.ClientSet.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"}, Rules: []rbacv1.PolicyRule{{Verbs: []string{"create"}, APIGroups: []string{"certificates.k8s.io"}, Resources: []string{"certificatesigningrequests"}}}, }, metav1.CreateOptions{}) @@ -99,11 +99,11 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err) } else { defer func() { - framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoles().Delete(ctx, clusterRole.Name, metav1.DeleteOptions{})) }() } - clusterRoleBinding, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ + clusterRoleBinding, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"}, RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: clusterRole.Name}, Subjects: []rbacv1.Subject{{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: commonName}}, @@ -113,15 +113,15 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err) } else { defer func() { - framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBinding.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBinding.Name, metav1.DeleteOptions{})) }() } framework.Logf("creating CSR") - csr, err := csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) + csr, err := csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { - framework.ExpectNoError(csrClient.Delete(context.TODO(), csr.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(csrClient.Delete(ctx, csr.Name, metav1.DeleteOptions{})) }() framework.Logf("approving CSR") @@ -134,9 +134,9 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { Message: "Set from an e2e test", }, } - csr, err = csrClient.UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) + csr, err = csrClient.UpdateApproval(ctx, csr.Name, csr, metav1.UpdateOptions{}) if err != nil { - csr, _ = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}) + csr, _ = csrClient.Get(ctx, csr.Name, metav1.GetOptions{}) framework.Logf("err updating approval: %v", err) return false, nil } @@ -145,7 +145,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { framework.Logf("waiting for CSR to be signed") framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) { - csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}) + csr, err = csrClient.Get(ctx, csr.Name, metav1.GetOptions{}) if err != nil { framework.Logf("error getting csr: %v", err) return false, nil @@ -177,10 +177,10 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err) framework.Logf("creating CSR as new client") - newCSR, err := newClient.CertificateSigningRequests().Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) + newCSR, err := newClient.CertificateSigningRequests().Create(ctx, csrTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { - framework.ExpectNoError(csrClient.Delete(context.TODO(), newCSR.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(csrClient.Delete(ctx, newCSR.Name, metav1.DeleteOptions{})) }() framework.ExpectEqual(newCSR.Spec.Username, commonName) }) @@ -251,7 +251,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { ginkgo.By("getting /apis/certificates.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/certificates.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/certificates.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -294,38 +294,38 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { // Main resource create/read/update/watch operations ginkgo.By("creating") - _, err = csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) + _, err = csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) + _, err = csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdCSR, err := csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) + createdCSR, err := csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenCSR, err := csrClient.Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}) + gottenCSR, err := csrClient.Get(ctx, createdCSR.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenCSR.UID, createdCSR.UID) framework.ExpectEqual(gottenCSR.Spec.ExpirationSeconds, csr.DurationToExpirationSeconds(time.Hour)) ginkgo.By("listing") - csrs, err := csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) + csrs, err := csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) framework.ExpectNoError(err) framework.ExpectEqual(len(csrs.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - csrWatch, err := csrClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: csrs.ResourceVersion, FieldSelector: "metadata.name=" + createdCSR.Name}) + csrWatch, err := csrClient.Watch(ctx, metav1.ListOptions{ResourceVersion: csrs.ResourceVersion, FieldSelector: "metadata.name=" + createdCSR.Name}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedCSR, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedCSR, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedCSR.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") csrToUpdate := patchedCSR.DeepCopy() csrToUpdate.Annotations["updated"] = "true" - updatedCSR, err := csrClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{}) + updatedCSR, err := csrClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedCSR.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -356,13 +356,13 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { // /approval subresource operations ginkgo.By("getting /approval") - gottenApproval, err := f.DynamicClient.Resource(csrResource).Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}, "approval") + gottenApproval, err := f.DynamicClient.Resource(csrResource).Get(ctx, createdCSR.Name, metav1.GetOptions{}, "approval") framework.ExpectNoError(err) framework.ExpectEqual(gottenApproval.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest")) framework.ExpectEqual(gottenApproval.GetUID(), createdCSR.UID) ginkgo.By("patching /approval") - patchedApproval, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, + patchedApproval, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedapproval":"true"}},"status":{"conditions":[{"type":"ApprovalPatch","status":"True","reason":"e2e"}]}}`), metav1.PatchOptions{}, "approval") framework.ExpectNoError(err) @@ -378,7 +378,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { Reason: "E2E", Message: "Set from an e2e test", }) - updatedApproval, err := csrClient.UpdateApproval(context.TODO(), approvalToUpdate.Name, approvalToUpdate, metav1.UpdateOptions{}) + updatedApproval, err := csrClient.UpdateApproval(ctx, approvalToUpdate.Name, approvalToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(updatedApproval.Status.Conditions), 2, fmt.Sprintf("updated object should have the applied condition, got %#v", updatedApproval.Status.Conditions)) framework.ExpectEqual(updatedApproval.Status.Conditions[1].Type, certificatesv1.CertificateApproved, fmt.Sprintf("updated object should have the approved condition, got %#v", updatedApproval.Status.Conditions)) @@ -386,13 +386,13 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { // /status subresource operations ginkgo.By("getting /status") - gottenStatus, err := f.DynamicClient.Resource(csrResource).Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}, "status") + gottenStatus, err := f.DynamicClient.Resource(csrResource).Get(ctx, createdCSR.Name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) framework.ExpectEqual(gottenStatus.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest")) framework.ExpectEqual(gottenStatus.GetUID(), createdCSR.UID) ginkgo.By("patching /status") - patchedStatus, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, + patchedStatus, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"certificate":`+string(certificateDataJSON)+`}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) @@ -407,7 +407,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { Reason: "E2E", Message: "Set from an e2e test", }) - updatedStatus, err := csrClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err := csrClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(updatedStatus.Status.Conditions), len(statusToUpdate.Status.Conditions), fmt.Sprintf("updated object should have the applied condition, got %#v", updatedStatus.Status.Conditions)) framework.ExpectEqual(string(updatedStatus.Status.Conditions[len(updatedStatus.Status.Conditions)-1].Type), "StatusUpdate", fmt.Sprintf("updated object should have the approved condition, got %#v", updatedStatus.Status.Conditions)) @@ -415,20 +415,20 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { // main resource delete operations ginkgo.By("deleting") - err = csrClient.Delete(context.TODO(), createdCSR.Name, metav1.DeleteOptions{}) + err = csrClient.Delete(ctx, createdCSR.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = csrClient.Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}) + _, err = csrClient.Get(ctx, createdCSR.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %#v", err) } - csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) + csrs, err = csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) framework.ExpectNoError(err) framework.ExpectEqual(len(csrs.Items), 2, "filtered list should have 2 items") ginkgo.By("deleting a collection") - err = csrClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) + err = csrClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) framework.ExpectNoError(err) - csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) + csrs, err = csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) framework.ExpectNoError(err) framework.ExpectEqual(len(csrs.Items), 0, "filtered list should have 0 items") }) diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index d5708dd32a5..8ac2fc3664d 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -41,10 +41,10 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var ns string var nodeIPs []string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name - nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, 1) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, 1) framework.ExpectNoError(err) family := v1.IPv4Protocol @@ -57,7 +57,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { }) ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func(ctx context.Context) { - pod := createNodeAuthTestPod(f) + pod := createNodeAuthTestPod(ctx, f) for _, nodeIP := range nodeIPs { // Anonymous authentication is disabled by default host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) @@ -76,10 +76,10 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { }, AutomountServiceAccountToken: &trueValue, } - _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(ctx, newSA, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name) - pod := createNodeAuthTestPod(f) + pod := createNodeAuthTestPod(ctx, f) for _, nodeIP := range nodeIPs { host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) @@ -94,8 +94,8 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { }) }) -func createNodeAuthTestPod(f *framework.Framework) *v1.Pod { +func createNodeAuthTestPod(ctx context.Context, f *framework.Framework) *v1.Pod { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil) pod.ObjectMeta.GenerateName = "test-node-authn-" - return e2epod.NewPodClient(f).CreateSync(pod) + return e2epod.NewPodClient(f).CreateSync(ctx, pod) } diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index 24586d48689..570d59a092e 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -49,10 +49,10 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { var ns string var asUser string var nodeName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) framework.ExpectNotEqual(len(nodeList.Items), 0) nodeName = nodeList.Items[0].Name @@ -69,7 +69,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }) ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { - _, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) + _, err := c.CoreV1().Secrets(ns).Get(ctx, "foo", metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } @@ -84,16 +84,16 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, StringData: map[string]string{}, } - _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create secret (%s:%s) %+v", ns, secret.Name, *secret) - _, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } }) ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { - _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) + _, err := c.CoreV1().ConfigMaps(ns).Get(ctx, "foo", metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } @@ -110,9 +110,9 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": "content", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(ctx, configmap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) - _, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmap.Name, metav1.GetOptions{}) + _, err = c.CoreV1().ConfigMaps(ns).Get(ctx, configmap.Name, metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } @@ -129,11 +129,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": []byte("keep it secret"), }, } - _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name) ginkgo.By("Node should not get the secret") - _, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } @@ -164,14 +164,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } - _, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name) ginkgo.By("The node should able to access the secret") itv := framework.Poll dur := 1 * time.Minute err = wait.Poll(itv, dur, func() (bool, error) { - _, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get secret %v, err: %v", secret.Name, err) return false, nil @@ -190,7 +190,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - _, err := c.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + _, err := c.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) // NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have // a zombie node in a NotReady state which will delay further tests since we're waiting for all @@ -204,7 +204,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { ginkgo.It("A node shouldn't be able to delete another node", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", metav1.DeleteOptions{}) + err := c.CoreV1().Nodes().Delete(ctx, "foo", metav1.DeleteOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } diff --git a/test/e2e/auth/selfsubjectreviews.go b/test/e2e/auth/selfsubjectreviews.go index 9419345e97a..29ec28c54e6 100644 --- a/test/e2e/auth/selfsubjectreviews.go +++ b/test/e2e/auth/selfsubjectreviews.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() { ginkgo.By("getting /apis/authentication.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/authentication.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/authentication.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -112,7 +112,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() { } ssrClient := kubernetes.NewForConfigOrDie(config).AuthenticationV1alpha1().SelfSubjectReviews() - res, err := ssrClient.Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) + res, err := ssrClient.Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(config.Impersonate.UserName, res.Status.UserInfo.Username) diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 5dc8f6327ec..111ab22118a 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { { ginkgo.By("ensuring no secret-based service account token exists") time.Sleep(10 * time.Second) - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(ctx, "default", metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEmpty(sa.Secrets) } @@ -76,11 +76,11 @@ var _ = SIGDescribe("ServiceAccounts", func() { Account mount path MUST be auto mounted to the Container. */ framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) { - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) framework.ExpectNoError(err) zero := int64(0) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-service-account-" + string(uuid.NewUUID()), }, @@ -96,7 +96,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { }, }, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name) mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey)) @@ -107,14 +107,14 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(err) // CA and namespace should be identical - rootCA, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) + rootCA, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name) framework.ExpectEqual(mountedCA, rootCA.Data["ca.crt"]) framework.ExpectEqual(mountedNamespace, f.Namespace.Name) // Token should be a valid credential that identifies the pod's service account tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}} - tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}) + tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(ctx, tokenReview, metav1.CreateOptions{}) framework.ExpectNoError(err) if !tokenReview.Status.Authenticated { framework.Fail("tokenReview is not authenticated") @@ -165,9 +165,9 @@ var _ = SIGDescribe("ServiceAccounts", func() { falseValue := false mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue} nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue} - mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA, metav1.CreateOptions{}) + mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, mountSA, metav1.CreateOptions{}) framework.ExpectNoError(err) - nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA, metav1.CreateOptions{}) + nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, nomountSA, metav1.CreateOptions{}) framework.ExpectNoError(err) testcases := []struct { @@ -246,7 +246,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { AutomountServiceAccountToken: tc.AutomountPodSpec, }, } - createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("created pod %s", tc.PodName) @@ -317,7 +317,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`), } - e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) + e2eoutput.TestContainerOutputRegexp(ctx, f, "service account token: ", pod, 0, output) }) /* @@ -425,7 +425,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID), fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID), } - e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) + e2eoutput.TestContainerOutputRegexp(ctx, f, "service account token: ", pod, 0, output) } }) @@ -489,11 +489,11 @@ var _ = SIGDescribe("ServiceAccounts", func() { }}, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("created pod") - if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { + if !e2epod.CheckPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) } @@ -502,7 +502,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { var logs string if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { framework.Logf("polling logs") - logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") + logs, err = e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") if err != nil { framework.Logf("Error pulling logs: %v", err) return false, nil @@ -538,7 +538,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { const clusterRoleName = "system:service-account-issuer-discovery" crbName := fmt.Sprintf("%s-%s", f.Namespace.Name, clusterRoleName) if crb, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create( - context.TODO(), + ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: crbName, @@ -564,7 +564,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { defer func() { framework.ExpectNoError( f.ClientSet.RbacV1().ClusterRoleBindings().Delete( - context.TODO(), + ctx, crb.Name, metav1.DeleteOptions{})) }() } @@ -612,17 +612,17 @@ var _ = SIGDescribe("ServiceAccounts", func() { }}, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("created pod") - podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + podErr := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) // Get the logs before calling ExpectNoError, so we can debug any errors. var logs string if err := wait.Poll(30*time.Second, 2*time.Minute, func() (done bool, err error) { framework.Logf("polling logs") - logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + logs, err = e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) if err != nil { framework.Logf("Error pulling logs: %v", err) return false, nil @@ -659,16 +659,16 @@ var _ = SIGDescribe("ServiceAccounts", func() { Labels: testServiceAccountStaticLabels, }, } - createdServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Create(context.TODO(), &testServiceAccount, metav1.CreateOptions{}) + createdServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Create(ctx, &testServiceAccount, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create a ServiceAccount") - getServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Get(context.TODO(), testServiceAccountName, metav1.GetOptions{}) + getServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Get(ctx, testServiceAccountName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the created ServiceAccount") framework.ExpectEqual(createdServiceAccount.UID, getServiceAccount.UID) ginkgo.By("watching for the ServiceAccount to be added") resourceWatchTimeoutSeconds := int64(180) - resourceWatch, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Watch(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat, TimeoutSeconds: &resourceWatchTimeoutSeconds}) + resourceWatch, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Watch(ctx, metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat, TimeoutSeconds: &resourceWatchTimeoutSeconds}) if err != nil { fmt.Println(err, "failed to setup watch on newly created ServiceAccount") return @@ -691,7 +691,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { AutomountServiceAccountToken: &boolFalse, }) framework.ExpectNoError(err, "failed to marshal JSON patch for the ServiceAccount") - _, err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Patch(context.TODO(), testServiceAccountName, types.StrategicMergePatchType, []byte(testServiceAccountPatchData), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Patch(ctx, testServiceAccountName, types.StrategicMergePatchType, []byte(testServiceAccountPatchData), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch the ServiceAccount") eventFound = false for watchEvent := range resourceWatchChan { @@ -704,7 +704,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.Failf("failed to find %v event", watch.Modified) } ginkgo.By("finding ServiceAccount in list of all ServiceAccounts (by LabelSelector)") - serviceAccountList, err := f.ClientSet.CoreV1().ServiceAccounts("").List(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat}) + serviceAccountList, err := f.ClientSet.CoreV1().ServiceAccounts("").List(ctx, metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat}) framework.ExpectNoError(err, "failed to list ServiceAccounts by LabelSelector") foundServiceAccount := false for _, serviceAccountItem := range serviceAccountList.Items { @@ -717,7 +717,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.Fail("failed to find the created ServiceAccount") } ginkgo.By("deleting the ServiceAccount") - err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to delete the ServiceAccount by Collection") eventFound = false for watchEvent := range resourceWatchChan { @@ -741,7 +741,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { */ framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func(ctx context.Context) { framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{}) if err == nil { return true, nil } @@ -753,12 +753,12 @@ var _ = SIGDescribe("ServiceAccounts", func() { })) framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name) - framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)})) + framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)})) framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name) framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { ginkgo.By("waiting for a new root ca configmap created") - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{}) if err == nil { return true, nil } @@ -770,7 +770,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { })) framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name) - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{ + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: rootCAConfigMapName, }, @@ -783,7 +783,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { ginkgo.By("waiting for the root ca configmap reconciled") - cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) + cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { ginkgo.By("root ca configmap not found, retrying") @@ -819,7 +819,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } ginkgo.By(fmt.Sprintf("Creating ServiceAccount %q ", saName)) - createdServiceAccount, err := saClient.Create(context.TODO(), initialServiceAccount, metav1.CreateOptions{}) + createdServiceAccount, err := saClient.Create(ctx, initialServiceAccount, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(createdServiceAccount.AutomountServiceAccountToken, utilptr.Bool(false), "Failed to set AutomountServiceAccountToken") framework.Logf("AutomountServiceAccountToken: %v", *createdServiceAccount.AutomountServiceAccountToken) @@ -828,10 +828,10 @@ var _ = SIGDescribe("ServiceAccounts", func() { var updatedServiceAccount *v1.ServiceAccount err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - updateServiceAccount, err := saClient.Get(context.TODO(), saName, metav1.GetOptions{}) + updateServiceAccount, err := saClient.Get(ctx, saName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get ServiceAccount %q", saName) updateServiceAccount.AutomountServiceAccountToken = utilptr.Bool(true) - updatedServiceAccount, err = saClient.Update(context.TODO(), updateServiceAccount, metav1.UpdateOptions{}) + updatedServiceAccount, err = saClient.Update(ctx, updateServiceAccount, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "Failed to update ServiceAccount") diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index fd0119b2a2e..c07536b0968 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -39,9 +39,9 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" var experiment *gmeasure.Experiment ginkgo.Describe("Autoscaling a service", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. - _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") } @@ -54,7 +54,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test. var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes. - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // Make sure there is only 1 node group, otherwise this test becomes useless. nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") if len(nodeGroups) != 1 { @@ -70,19 +70,19 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" } // Make sure all nodes are schedulable, otherwise we are in some kind of a problem state. - nodes, err = e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err = e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) schedulableCount := len(nodes.Items) framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable") }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Attempt cleanup only if a node group was targeted for scale up. // Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters. if len(nodeGroupName) > 0 { // Scale down back to only 'nodesNum' nodes, as expected at the start of the test. framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum)) - framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, nodesNum, 15*time.Minute)) } }) @@ -102,21 +102,21 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. replicas := 1 - resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) + resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer(ctx, "resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) ginkgo.DeferCleanup(resourceConsumer.CleanUp) - resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. + resourceConsumer.WaitForReplicas(ctx, replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. // Enable Horizontal Pod Autoscaler with 50% target utilization and // scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied. targetCPUUtilizationPercent := int32(50) - hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10) + hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(ctx, resourceConsumer, targetCPUUtilizationPercent, 1, 10) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, resourceConsumer, hpa.Name) cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level resourceConsumer.ConsumeCPU(int(cpuLoad)) // Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each. experiment.SampleDuration("total scale-up time", func(idx int) { - resourceConsumer.WaitForReplicas(8, timeToWait) + resourceConsumer.WaitForReplicas(ctx, 8, timeToWait) }, gmeasure.SamplingConfig{N: 1}) }) // Increase to run the test more than once. }) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 36aae1f3bc0..3c30a52b2d1 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -70,11 +70,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { var originalSizes map[string]int var sum int - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark") // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. - _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") } @@ -92,9 +92,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { } } - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, sum, scaleUpTimeout)) - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) nodeCount = len(nodes.Items) cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU] @@ -114,17 +114,17 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster")) setMigSizes(originalSizes) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout)) - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount, scaleDownTimeout)) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() makeSchedulableLoop: for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) { for _, n := range nodes.Items { - err = makeNodeSchedulable(c, &n, true) + err = makeNodeSchedulable(ctx, c, &n, true) switch err.(type) { case CriticalAddonsOnlyError: continue makeSchedulableLoop @@ -146,9 +146,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { additionalReservation := additionalNodes * perNodeReservation // saturate cluster - reservationCleanup := ReserveMemory(f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout) + reservationCleanup := ReserveMemory(ctx, f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout) defer reservationCleanup() - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) // configure pending pods & expected scale up rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas, additionalReservation, largeScaleUpTimeout) @@ -156,7 +156,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult) // run test - testCleanup := simpleScaleUpTest(f, config) + testCleanup := simpleScaleUpTest(ctx, f, config) defer testCleanup() }) @@ -176,9 +176,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { // saturate cluster initialReplicas := nodeCount - reservationCleanup := ReserveMemory(f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout) + reservationCleanup := ReserveMemory(ctx, f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout) defer reservationCleanup() - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) klog.Infof("Reserved successfully") @@ -190,7 +190,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { // run test #1 tolerateUnreadyNodes := additionalNodes1 / 20 tolerateUnreadyPods := (initialReplicas + replicas1) / 20 - testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods) + testCleanup1 := simpleScaleUpTestWithTolerance(ctx, f, config, tolerateUnreadyNodes, tolerateUnreadyPods) defer testCleanup1() klog.Infof("Scaled up once") @@ -203,7 +203,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { // run test #2 tolerateUnreadyNodes = maxNodes / 20 tolerateUnreadyPods = (initialReplicas + replicas1 + replicas2) / 20 - testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods) + testCleanup2 := simpleScaleUpTestWithTolerance(ctx, f, config2, tolerateUnreadyNodes, tolerateUnreadyPods) defer testCleanup2() klog.Infof("Scaled up twice") @@ -219,7 +219,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { anyKey(originalSizes): totalNodes, } setMigSizes(newSizes) - framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout)) // run replicas rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout) @@ -227,11 +227,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { config := createScaleUpTestConfig(totalNodes, totalNodes, rcConfig, expectedResult) tolerateUnreadyNodes := totalNodes / 10 tolerateUnreadyPods := replicas / 10 - testCleanup := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods) + testCleanup := simpleScaleUpTestWithTolerance(ctx, f, config, tolerateUnreadyNodes, tolerateUnreadyPods) defer testCleanup() // check if empty nodes are scaled down - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size <= replicas+3 // leaving space for non-evictable kube-system pods }, scaleDownTimeout)) @@ -253,19 +253,19 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { } setMigSizes(newSizes) - framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout)) // annotate all nodes with no-scale-down ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled" - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{ FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String(), }) framework.ExpectNoError(err) - framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "true")) + framework.ExpectNoError(addAnnotation(ctx, f, nodes.Items, ScaleDownDisabledKey, "true")) // distribute pods using replication controllers taking up space that should // be empty after pods are distributed @@ -276,11 +276,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { {numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas}, {numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}} - distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation, + distributeLoad(ctx, f, f.Namespace.Name, "10-70", podDistribution, perPodReservation, int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout) // enable scale down again - framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false")) + framework.ExpectNoError(addAnnotation(ctx, f, nodes.Items, ScaleDownDisabledKey, "false")) // wait for scale down to start. Node deletion takes a long time, so we just // wait for maximum of 30 nodes deleted @@ -290,7 +290,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { } expectedSize := totalNodes - nodesToScaleDownCount timeout := time.Duration(nodesToScaleDownCount)*time.Minute + scaleDownTimeout - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size <= expectedSize }, timeout)) }) @@ -306,41 +306,41 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { anyKey(originalSizes): totalNodes, } setMigSizes(newSizes) - framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout)) divider := int(float64(totalNodes) * 0.7) fullNodesCount := divider underutilizedNodesCount := totalNodes - fullNodesCount ginkgo.By("Reserving full nodes") // run RC1 w/o host port - cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2) + cleanup := ReserveMemory(ctx, f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2) defer cleanup() ginkgo.By("Reserving host ports on remaining nodes") // run RC2 w/ host port ginkgo.DeferCleanup(createHostPortPodsWithMemory, f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout) - waitForAllCaPodsReadyInNamespace(f, c) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) // wait and check scale down doesn't occur ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes())) time.Sleep(scaleDownTimeout) ginkgo.By("Checking if the number of nodes is as expected") - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) framework.ExpectEqual(len(nodes.Items), totalNodes) }) - ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() { + ginkgo.It("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func(ctx context.Context) { // Start a number of pods saturating existing nodes. perNodeReservation := int(float64(memCapacityMb) * 0.80) replicasPerNode := 10 initialPodReplicas := nodeCount * replicasPerNode initialPodsTotalMemory := nodeCount * perNodeReservation - reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout) + reservationCleanup := ReserveMemory(ctx, f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout) ginkgo.DeferCleanup(reservationCleanup) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) // Configure a number of unschedulable pods. unschedulableMemReservation := memCapacityMb * 2 @@ -348,11 +348,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas timeToWait := 5 * time.Minute podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) - _ = e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) + _ = e2erc.RunRC(ctx, *podsConfig) // Ignore error (it will occur because pods are unschedulable) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, podsConfig.Name) // Ensure that no new nodes have been added so far. - readyNodeCount, _ := e2enode.TotalReady(f.ClientSet) + readyNodeCount, _ := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectEqual(readyNodeCount, nodeCount) // Start a number of schedulable pods to ensure CA reacts. @@ -364,7 +364,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { config := createScaleUpTestConfig(nodeCount, initialPodReplicas, rcConfig, expectedResult) // Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled. - testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas) + testCleanup := simpleScaleUpTestWithTolerance(ctx, f, config, 0, unschedulablePodReplicas) ginkgo.DeferCleanup(testCleanup) }) @@ -377,35 +377,35 @@ func anyKey(input map[string]int) string { return "" } -func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error { +func simpleScaleUpTestWithTolerance(ctx context.Context, f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error { // resize cluster to start size // run rc based on config ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name)) start := time.Now() - framework.ExpectNoError(e2erc.RunRC(*config.extraPods)) + framework.ExpectNoError(e2erc.RunRC(ctx, *config.extraPods)) // check results if tolerateMissingNodeCount > 0 { // Tolerate some number of nodes not to be created. minExpectedNodeCount := config.expectedResult.nodes - tolerateMissingNodeCount - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout)) } else { - framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, config.expectedResult.nodes, scaleUpTimeout)) } klog.Infof("cluster is increased") if tolerateMissingPodCount > 0 { - framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount)) + framework.ExpectNoError(waitForCaPodsReadyInNamespace(ctx, f, f.ClientSet, tolerateMissingPodCount)) } else { - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet)) } timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes)) return func() error { - return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name) + return e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, config.extraPods.Name) } } -func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func() error { - return simpleScaleUpTestWithTolerance(f, config, 0, 0) +func simpleScaleUpTest(ctx context.Context, f *framework.Framework, config *scaleUpTestConfig) func() error { + return simpleScaleUpTestWithTolerance(ctx, f, config, 0, 0) } func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig { @@ -435,7 +435,7 @@ func createClusterPredicates(nodes int) *clusterPredicates { } } -func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) error { +func addAnnotation(ctx context.Context, f *framework.Framework, nodes []v1.Node, key, value string) error { for _, node := range nodes { oldData, err := json.Marshal(node) if err != nil { @@ -457,7 +457,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e return err } - _, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Nodes().Patch(ctx, string(node.Name), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { return err } @@ -465,7 +465,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e return nil } -func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error { +func createHostPortPodsWithMemory(ctx context.Context, f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error { ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory")) request := int64(1024 * 1024 * megabytes / replicas) config := &testutils.RCConfig{ @@ -478,10 +478,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p HostPorts: map[string]int{"port1": port}, MemRequest: request, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) framework.ExpectNoError(err) return func() error { - return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) + return e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, id) } } @@ -501,7 +501,7 @@ type podBatch struct { // conflicting host port // 2. Create target RC that will generate the load on the cluster // 3. Remove the rcs created in 1. -func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch, +func distributeLoad(ctx context.Context, f *framework.Framework, namespace string, id string, podDistribution []podBatch, podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) { port := 8013 // Create load-distribution RCs with one pod per node, reserving all remaining @@ -512,14 +512,14 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist totalPods += podBatch.numNodes * podBatch.podsPerNode remainingMem := nodeMemCapacity - podBatch.podsPerNode*podMemRequestMegabytes replicas := podBatch.numNodes - cleanup := createHostPortPodsWithMemory(f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout) + cleanup := createHostPortPodsWithMemory(ctx, f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout) defer cleanup() } - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet)) // Create the target RC rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout) - framework.ExpectNoError(e2erc.RunRC(*rcConfig)) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) + framework.ExpectNoError(e2erc.RunRC(ctx, *rcConfig)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, id) } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 752b051ba18..e83fc612001 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -100,7 +100,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { var memAllocatableMb int var originalSizes map[string]int - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet e2eskipper.SkipUnlessProviderIs("gce", "gke") @@ -114,9 +114,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { sum += size } // Give instances time to spin up - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, sum, scaleUpTimeout)) - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) nodeCount = len(nodes.Items) ginkgo.By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount)) @@ -136,7 +136,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster")) setMigSizes(originalSizes) @@ -144,15 +144,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { for _, size := range originalSizes { expectedNodes += size } - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout)) - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, expectedNodes, scaleDownTimeout)) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() makeSchedulableLoop: for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) { for _, n := range nodes.Items { - err = makeNodeSchedulable(c, &n, true) + err = makeNodeSchedulable(ctx, c, &n, true) switch err.(type) { case CriticalAddonsOnlyError: continue makeSchedulableLoop @@ -167,7 +167,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.By("Creating unschedulable pod") - ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) + ReserveMemory(ctx, f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.By("Waiting for scale up hoping it won't happen") @@ -176,7 +176,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { ginkgo.By("Waiting for NotTriggerScaleUp event") - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -191,22 +191,23 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.Failf("Expected event with kind 'Pod' and reason 'NotTriggerScaleUp' not found.") } // Verify that cluster size is not changed - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size <= nodeCount }, time.Second)) }) - simpleScaleUpTest := func(unready int) { - ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) + simpleScaleUpTest := func(ctx context.Context, unready int) { + ReserveMemory(ctx, f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout, unready)) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) } - ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", - func() { simpleScaleUpTest(0) }) + ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + simpleScaleUpTest(ctx, 0) + }) gpuType := os.Getenv("TESTED_GPU_TYPE") @@ -221,20 +222,20 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 0) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet(f) + installNvidiaDriversDaemonSet(ctx, f) ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) defer disableAutoscaler(gpuPoolName, 0, 1) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 0) ginkgo.By("Schedule a pod which requires GPU") - framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) + framework.ExpectNoError(ScheduleAnySingleGpuPod(ctx, f, "gpu-pod-rc")) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 1) }) ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { @@ -248,23 +249,23 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 1) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet(f) + installNvidiaDriversDaemonSet(ctx, f) ginkgo.By("Schedule a single pod which requires GPU") - framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) + framework.ExpectNoError(ScheduleAnySingleGpuPod(ctx, f, "gpu-pod-rc")) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc") ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) defer disableAutoscaler(gpuPoolName, 0, 2) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 1) ginkgo.By("Scale GPU deployment") - e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true) + e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 2) }) ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { @@ -278,22 +279,22 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 0) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet(f) + installNvidiaDriversDaemonSet(ctx, f) ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) defer disableAutoscaler(gpuPoolName, 0, 1) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 0) ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") - ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) + ReserveMemory(ctx, f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) // Expect gpu pool to stay intact - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 0) }) ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { @@ -307,33 +308,32 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addGpuNodePool(gpuPoolName, gpuType, 1, 1) defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet(f) + installNvidiaDriversDaemonSet(ctx, f) ginkgo.By("Schedule a single pod which requires GPU") - framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) + framework.ExpectNoError(ScheduleAnySingleGpuPod(ctx, f, "gpu-pod-rc")) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc") ginkgo.By("Enable autoscaler") framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) defer disableAutoscaler(gpuPoolName, 0, 1) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 1) ginkgo.By("Remove the only POD requiring GPU") - e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") + e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, "gpu-pod-rc") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount }, scaleDownTimeout)) - framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0) + framework.ExpectEqual(len(getPoolNodes(ctx, f, gpuPoolName)), 0) }) - ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", - func() { - e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) }) - }) + ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + e2enetwork.TestUnderTemporaryNetworkFailure(ctx, c, "default", getAnyNode(ctx, c), func(ctx context.Context) { simpleScaleUpTest(ctx, 1) }) + }) ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { // Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info. - status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { + status, err := waitForScaleUpStatus(ctx, c, func(s *scaleUpStatus) bool { return s.ready == s.target && s.ready <= nodeCount }, scaleUpTriggerTimeout) framework.ExpectNoError(err) @@ -341,21 +341,21 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { unmanagedNodes := nodeCount - status.ready ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up") - ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) + ReserveMemory(ctx, f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") - status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { + status, err = waitForScaleUpStatus(ctx, c, func(s *scaleUpStatus) bool { return s.status == caOngoingScaleUpStatus }, scaleUpTriggerTimeout) framework.ExpectNoError(err) target := status.target - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) ginkgo.By("Expect no more scale-up to be happening after all pods are scheduled") // wait for a while until scale-up finishes; we cannot read CA status immediately // after pods are scheduled as status config map is updated by CA once every loop iteration - status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { + status, err = waitForScaleUpStatus(ctx, c, func(s *scaleUpStatus) bool { return s.status == caNoScaleUpStatus }, 2*freshStatusLimit) framework.ExpectNoError(err) @@ -366,7 +366,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false) framework.ExpectEqual(status.status, caNoScaleUpStatus) framework.ExpectEqual(status.ready, status.target) - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes) }) @@ -379,14 +379,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) // We wait for nodes to become schedulable to make sure the new nodes // will be returned by getPoolNodes below. - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, resizeTimeout)) klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC") - nodes := getPoolNodes(f, extraPoolName) + nodes := getPoolNodes(ctx, f, extraPoolName) framework.ExpectEqual(len(nodes), extraNodes) extraMemMb := 0 for _, node := range nodes { @@ -397,12 +397,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up") totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") - ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) + ReserveMemory(ctx, f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) // Verify, that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout)) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) }) ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -413,18 +413,18 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2)) framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2)) }) ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { - scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) + scheduling.CreateHostPortPods(ctx, f, "host-port", nodeCount+2, false) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "host-port") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) }) ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -434,16 +434,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { "anti-affinity": "yes", } ginkgo.By("starting a pod with anti-affinity on each node") - framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) + framework.ExpectNoError(runAntiAffinityPods(ctx, f, f.Namespace.Name, pods, "some-pod", labels, labels)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod") - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) ginkgo.By("scheduling extra pods with anti-affinity to existing ones") - framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) + framework.ExpectNoError(runAntiAffinityPods(ctx, f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod") - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -453,18 +453,18 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { labels := map[string]string{ "anti-affinity": "yes", } - framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) + framework.ExpectNoError(runAntiAffinityPods(ctx, f, f.Namespace.Name, pods, "some-pod", labels, labels)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod") ginkgo.By("waiting for all pods before triggering scale up") - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) ginkgo.By("creating a pod requesting EmptyDir") - framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) + framework.ExpectNoError(runVolumeAntiAffinityPods(ctx, f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod") - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -476,7 +476,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { selector := metav1.SetAsLabelSelector(volumeLabels) ginkgo.By("creating volume & pvc") - diskName, err := e2epv.CreatePDWithRetry() + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err) pvConfig := e2epv.PersistentVolumeConfig{ NamePrefix: "gce-", @@ -496,18 +496,18 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { StorageClassName: &emptyStorageClass, } - pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) + pv, pvc, err := e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, f.Namespace.Name, pv, pvc)) defer func() { - errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc) + errs := e2epv.PVPVCCleanup(ctx, c, f.Namespace.Name, pv, pvc) if len(errs) > 0 { framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv, pvc = nil, nil if diskName != "" { - framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName)) } }() @@ -516,25 +516,25 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { labels := map[string]string{ "anti-affinity": "yes", } - framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) + framework.ExpectNoError(runAntiAffinityPods(ctx, f, f.Namespace.Name, pods, "some-pod", labels, labels)) ginkgo.DeferCleanup(func(ctx context.Context) { - e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") + e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, "some-pod") klog.Infof("RC and pods not using volume deleted") }) ginkgo.By("waiting for all pods before triggering scale up") - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) ginkgo.By("creating a pod requesting PVC") pvcPodName := "pvc-pod" newPods := 1 volumes := buildVolumes(pv, pvc) - framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) + framework.ExpectNoError(runVolumeAntiAffinityPods(ctx, f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, pvcPodName) ginkgo.DeferCleanup(waitForAllCaPodsReadyInNamespace, f, c) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -577,12 +577,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) } - err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false) + err = scheduling.CreateNodeSelectorPods(ctx, f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false) framework.ExpectNoError(err) ginkgo.By("Waiting for new node to appear and annotating it") framework.WaitForGroupSize(minMig, int32(minSize+1)) // Verify that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) newNodes, err := framework.GetGroupNodes(minMig) @@ -619,7 +619,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet)) registeredNodes := sets.NewString() for nodeName := range newNodesSet { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err == nil && node != nil { registeredNodes.Insert(nodeName) } else { @@ -633,8 +633,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer removeLabels(registeredNodes) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector")) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) + framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, "node-selector")) }) ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { @@ -645,7 +645,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2)) defer disableAutoscaler(extraPoolName, 1, 2) @@ -653,17 +653,17 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb)) ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") - ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) + ReserveMemory(ctx, f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) // Apparently GKE master is restarted couple minutes after the node pool is added // resetting all the timers in scale down code. Adding 5 extra minutes to workaround // this issue. // TODO: Remove the extra time when GKE restart is fixed. - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute)) }) - simpleScaleDownTest := func(unready int) { - err := addKubeSystemPdbs(f) + simpleScaleDownTest := func(ctx context.Context, unready int) { + err := addKubeSystemPdbs(ctx, f) framework.ExpectNoError(err) ginkgo.By("Manually increase cluster size") @@ -674,34 +674,33 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { increasedSize += val + 2 + unready } setMigSizes(newSizes) - framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready)) ginkgo.By("Some node should be removed") - framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready)) } ginkgo.It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", - func() { simpleScaleDownTest(0) }) + func(ctx context.Context) { simpleScaleDownTest(ctx, 0) }) - ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]", - func() { - e2eskipper.SkipUnlessSSHKeyPresent() - e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) }) - }) + ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + e2eskipper.SkipUnlessSSHKeyPresent() + e2enetwork.TestUnderTemporaryNetworkFailure(ctx, c, "default", getAnyNode(ctx, c), func(ctx context.Context) { simpleScaleDownTest(ctx, 1) }) + }) ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") - increasedSize := manuallyIncreaseClusterSize(f, originalSizes) + increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-1", 3) defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout)) ginkgo.By("Some node should be removed") @@ -709,40 +708,40 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { // resetting all the timers in scale down code. Adding 10 extra minutes to workaround // this issue. // TODO: Remove the extra time when GKE restart is fixed. - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute)) }) ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) { + runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) }) }) ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { + runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { ginkgo.By("No nodes should be removed") time.Sleep(scaleDownTimeout) - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) framework.ExpectEqual(len(nodes.Items), increasedSize) }) }) ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) { + runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) }) }) ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) { + runDrainTest(ctx, f, originalSizes, "kube-system", 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) }) }) @@ -771,17 +770,17 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } } framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0))) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount-minSize, resizeTimeout)) } ginkgo.By("Make remaining nodes unschedulable") - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) for _, node := range nodes.Items { - err = makeNodeUnschedulable(f.ClientSet, &node) + err = makeNodeUnschedulable(ctx, f.ClientSet, &node) n := node ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false) @@ -790,13 +789,13 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } ginkgo.By("Run a scale-up test") - ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second) + ReserveMemory(ctx, f, "memory-reservation", 1, 100, false, 1*time.Second) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size >= len(nodes.Items)+1 }, scaleUpTimeout)) - framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) }) // Scale to 0 test is split into two functions (for GKE & GCE.) @@ -809,35 +808,35 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { // manually drain the single node from this node pool/MIG // wait for cluster size to decrease // verify the targeted node pool/MIG is of size 0 - gkeScaleToZero := func() { + gkeScaleToZero := func(ctx context.Context) { // GKE-specific setup ginkgo.By("Add a new node pool with size 1 and min size 0") const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-4", 1) defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1)) defer disableAutoscaler(extraPoolName, 0, 1) - ngNodes := getPoolNodes(f, extraPoolName) + ngNodes := getPoolNodes(ctx, f, extraPoolName) framework.ExpectEqual(len(ngNodes), extraNodes) for _, node := range ngNodes { ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name)) } for _, node := range ngNodes { - drainNode(f, node) + drainNode(ctx, f, node) } - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size <= nodeCount }, scaleDownTimeout)) // GKE-specific check - newSize := getPoolSize(f, extraPoolName) + newSize := getPoolSize(ctx, f, extraPoolName) framework.ExpectEqual(newSize, 0) } - gceScaleToZero := func() { + gceScaleToZero := func(ctx context.Context) { // non-GKE only ginkgo.By("Find smallest node group and manually scale it to a single node") minMig := "" @@ -849,19 +848,19 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } } framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1))) - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount-minSize+1, resizeTimeout)) ngNodes, err := framework.GetGroupNodes(minMig) framework.ExpectNoError(err) if len(ngNodes) != 1 { framework.Failf("Expected one node, got instead: %v", ngNodes) } - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), ngNodes[0], metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, ngNodes[0], metav1.GetOptions{}) ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name)) framework.ExpectNoError(err) // this part is identical - drainNode(f, node) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + drainNode(ctx, f, node) + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size < nodeCount-minSize+1 }, scaleDownTimeout)) // non-GKE only @@ -872,9 +871,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { if framework.ProviderIs("gke") { // In GKE, we can just add a node pool - gkeScaleToZero() + gkeScaleToZero(ctx) } else if len(originalSizes) >= 2 { - gceScaleToZero() + gceScaleToZero(ctx) } else { e2eskipper.Skipf("At least 2 node groups are needed for scale-to-0 tests") } @@ -885,7 +884,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { clusterSize := nodeCount for clusterSize < unhealthyClusterThreshold+1 { - clusterSize = manuallyIncreaseClusterSize(f, originalSizes) + clusterSize = manuallyIncreaseClusterSize(ctx, f, originalSizes) } // If new nodes are disconnected too soon, they'll be considered not started @@ -906,7 +905,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster") nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize)))) - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -918,100 +917,100 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { // TestUnderTemporaryNetworkFailure only removes connectivity to a single node, // and accepts func() callback. This is expanding the loop to recursive call // to avoid duplicating TestUnderTemporaryNetworkFailure - var testFunction func() - testFunction = func() { + var testFunction func(ctx context.Context) + testFunction = func(ctx context.Context) { if len(nodesToBreak) > 0 { ntb := &nodesToBreak[0] nodesToBreak = nodesToBreak[1:] - e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) + e2enetwork.TestUnderTemporaryNetworkFailure(ctx, c, "default", ntb, testFunction) } else { - ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) + ReserveMemory(ctx, f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") // Wait for 15m to ensure Cluster Autoscaler won't consider broken nodes as still starting. time.Sleep(15 * time.Minute) - currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + currentNodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount) - status, err := getClusterwideStatus(c) + status, err := getClusterwideStatus(ctx, c) framework.Logf("Clusterwide status: %v", status) framework.ExpectNoError(err) framework.ExpectEqual(status, "Unhealthy") } } - testFunction() + testFunction(ctx) // Give nodes time to recover from network failure - framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout)) + framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, len(nodes.Items), nodesRecoverTimeout)) }) ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { - createPriorityClasses(f) + createPriorityClasses(ctx, f) // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. ginkgo.DeferCleanup(ReserveMemoryWithPriority, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName) ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String())) time.Sleep(scaleUpTimeout) // Verify that cluster size is not changed - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount }, time.Second)) }) ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { - createPriorityClasses(f) + createPriorityClasses(ctx, f) // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. - cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) + cleanupFunc := ReserveMemoryWithPriority(ctx, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) defer cleanupFunc() // Verify that cluster size is not changed - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size > nodeCount }, time.Second)) }) ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { - createPriorityClasses(f) + createPriorityClasses(ctx, f) // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. - cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) + cleanupFunc1 := ReserveMemoryWithPriority(ctx, f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) defer cleanupFunc1() // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. Pods created here should preempt pods created above. - cleanupFunc2 := ReserveMemoryWithPriority(f, "memory-reservation2", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, highPriorityClassName) + cleanupFunc2 := ReserveMemoryWithPriority(ctx, f, "memory-reservation2", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, highPriorityClassName) defer cleanupFunc2() - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount }, time.Second)) }) ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - createPriorityClasses(f) - increasedSize := manuallyIncreaseClusterSize(f, originalSizes) + createPriorityClasses(ctx, f) + increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. - cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName) + cleanupFunc := ReserveMemoryWithPriority(ctx, f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName) defer cleanupFunc() ginkgo.By("Waiting for scale down") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == nodeCount }, scaleDownTimeout)) }) ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { - createPriorityClasses(f) - increasedSize := manuallyIncreaseClusterSize(f, originalSizes) + createPriorityClasses(ctx, f) + increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. - cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) + cleanupFunc := ReserveMemoryWithPriority(ctx, f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) defer cleanupFunc() ginkgo.By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String())) time.Sleep(scaleDownTimeout) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool { return size == increasedSize }, time.Second)) }) }) -func installNvidiaDriversDaemonSet(f *framework.Framework) { +func installNvidiaDriversDaemonSet(ctx context.Context, f *framework.Framework) { ginkgo.By("Add daemonset which installs nvidia drivers") dsYamlURL := "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" framework.Logf("Using %v", dsYamlURL) // Creates the DaemonSet that installs Nvidia Drivers. - ds, err := e2emanifest.DaemonSetFromURL(dsYamlURL) + ds, err := e2emanifest.DaemonSetFromURL(ctx, dsYamlURL) framework.ExpectNoError(err) ds.Namespace = f.Namespace.Name - _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") } @@ -1020,17 +1019,17 @@ func execCmd(args ...string) *exec.Cmd { return exec.Command(args[0], args[1:]...) } -func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) { - increasedSize := manuallyIncreaseClusterSize(f, migSizes) +func runDrainTest(ctx context.Context, f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) { + increasedSize := manuallyIncreaseClusterSize(ctx, f, migSizes) - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) numPods := len(nodes.Items) * podsPerNode testID := string(uuid.NewUUID()) // So that we can label and find pods labelMap := map[string]string{"test_id": testID} - framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) + framework.ExpectNoError(runReplicatedPodOnEachNode(ctx, f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, namespace, "reschedulable-pods") @@ -1046,7 +1045,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str MinAvailable: &minAvailable, }, } - _, err = f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) + _, err = f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Create(ctx, pdb, metav1.CreateOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Delete), pdb.Name, metav1.DeleteOptions{}) @@ -1243,9 +1242,9 @@ func deleteNodePool(name string) { framework.ExpectNoError(err) } -func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node { +func getPoolNodes(ctx context.Context, f *framework.Framework, poolName string) []*v1.Node { nodes := make([]*v1.Node, 0, 1) - nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet) + nodeList, err := e2enode.GetReadyNodesIncludingTainted(ctx, f.ClientSet) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } @@ -1285,9 +1284,9 @@ func getPoolInitialSize(poolName string) int { return int(size) * nodeGroupCount } -func getPoolSize(f *framework.Framework, poolName string) int { +func getPoolSize(ctx context.Context, f *framework.Framework, poolName string) int { size := 0 - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) for _, node := range nodeList.Items { if node.Labels[gkeNodepoolNameKey] == poolName { @@ -1297,7 +1296,7 @@ func getPoolSize(f *framework.Framework, poolName string) int { return size } -func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error { +func reserveMemory(ctx context.Context, f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error { ginkgo.By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) request := int64(1024 * 1024 * megabytes / replicas) config := &testutils.RCConfig{ @@ -1313,7 +1312,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e PriorityClassName: priorityClassName, } for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) { - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if err != nil && strings.Contains(err.Error(), "Error creating replication controller") { klog.Warningf("Failed to create memory reservation: %v", err) continue @@ -1322,7 +1321,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e framework.ExpectNoError(err) } return func() error { - return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) + return e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, id) } } framework.Failf("Failed to reserve memory within timeout") @@ -1331,31 +1330,31 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e // ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation, // request the specified amount of memory. -func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error { - return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName) +func ReserveMemoryWithPriority(ctx context.Context, f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error { + return reserveMemory(ctx, f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName) } // ReserveMemoryWithSelectorAndTolerations creates a replication controller with pods with node selector that, in summation, // request the specified amount of memory. -func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error { - return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "") +func ReserveMemoryWithSelectorAndTolerations(ctx context.Context, f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error { + return reserveMemory(ctx, f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "") } // ReserveMemory creates a replication controller with pods that, in summation, // request the specified amount of memory. -func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error { - return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "") +func ReserveMemory(ctx context.Context, f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error { + return reserveMemory(ctx, f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "") } // WaitForClusterSizeFunc waits until the cluster size matches the given function. -func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { - return WaitForClusterSizeFuncWithUnready(c, sizeFunc, timeout, 0) +func WaitForClusterSizeFunc(ctx context.Context, c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { + return WaitForClusterSizeFuncWithUnready(ctx, c, sizeFunc, timeout, 0) } // WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes. -func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ +func WaitForClusterSizeFuncWithUnready(ctx context.Context, c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error { + for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(20 * time.Second) { + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1379,10 +1378,10 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) } -func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error { +func waitForCaPodsReadyInNamespace(ctx context.Context, f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error { var notready []string - for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { - pods, err := c.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)) && ctx.Err() == nil; time.Sleep(20 * time.Second) { + pods, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } @@ -1417,12 +1416,12 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface return fmt.Errorf("Too many pods are still not running: %v", notready) } -func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error { - return waitForCaPodsReadyInNamespace(f, c, 0) +func waitForAllCaPodsReadyInNamespace(ctx context.Context, f *framework.Framework, c clientset.Interface) error { + return waitForCaPodsReadyInNamespace(ctx, f, c, 0) } -func getAnyNode(c clientset.Interface) *v1.Node { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ +func getAnyNode(ctx context.Context, c clientset.Interface) *v1.Node { + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1451,24 +1450,24 @@ func setMigSizes(sizes map[string]int) bool { return madeChanges } -func drainNode(f *framework.Framework, node *v1.Node) { +func drainNode(ctx context.Context, f *framework.Framework, node *v1.Node) { ginkgo.By("Make the single node unschedulable") - makeNodeUnschedulable(f.ClientSet, node) + framework.ExpectNoError(makeNodeUnschedulable(ctx, f.ClientSet, node)) ginkgo.By("Manually drain the single node") podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector("spec.nodeName", node.Name).String()} - pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(ctx, podOpts) framework.ExpectNoError(err) for _, pod := range pods.Items { - err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) } } -func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { +func makeNodeUnschedulable(ctx context.Context, c clientset.Interface, node *v1.Node) error { ginkgo.By(fmt.Sprintf("Taint node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1482,7 +1481,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { Value: "DisabledForTest", Effect: v1.TaintEffectNoSchedule, }) - _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode, metav1.UpdateOptions{}) + _, err = c.CoreV1().Nodes().Update(ctx, freshNode, metav1.UpdateOptions{}) if err == nil { return nil } @@ -1502,10 +1501,10 @@ func (CriticalAddonsOnlyError) Error() string { return fmt.Sprintf("CriticalAddonsOnly taint found on node") } -func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error { +func makeNodeSchedulable(ctx context.Context, c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error { ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1523,7 +1522,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd return nil } freshNode.Spec.Taints = newTaints - _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode, metav1.UpdateOptions{}) + _, err = c.CoreV1().Nodes().Update(ctx, freshNode, metav1.UpdateOptions{}) if err == nil { return nil } @@ -1536,12 +1535,12 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd } // ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type -func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error { - return ScheduleGpuPod(f, id, "", 1) +func ScheduleAnySingleGpuPod(ctx context.Context, f *framework.Framework, id string) error { + return ScheduleGpuPod(ctx, f, id, "", 1) } // ScheduleGpuPod schedules a pod which requires a given number of gpus of given type -func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error { +func ScheduleGpuPod(ctx context.Context, f *framework.Framework, id string, gpuType string, gpuLimit int64) error { config := &testutils.RCConfig{ Client: f.ClientSet, Name: id, @@ -1557,7 +1556,7 @@ func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit config.NodeSelector = map[string]string{gpuLabel: gpuType} } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if err != nil { return err } @@ -1565,7 +1564,7 @@ func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit } // Create an RC running a given number of pods with anti-affinity -func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error { +func runAntiAffinityPods(ctx context.Context, f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error { config := &testutils.RCConfig{ Affinity: buildAntiAffinity(antiAffinityLabels), Client: f.ClientSet, @@ -1576,18 +1575,18 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id Replicas: pods, Labels: podLabels, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if err != nil { return err } - _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{}) if err != nil { return err } return nil } -func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error { +func runVolumeAntiAffinityPods(ctx context.Context, f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error { config := &testutils.RCConfig{ Affinity: buildAntiAffinity(antiAffinityLabels), Volumes: volumes, @@ -1599,11 +1598,11 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in Replicas: pods, Labels: podLabels, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if err != nil { return err } - _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{}) if err != nil { return err } @@ -1657,10 +1656,10 @@ func buildAntiAffinity(labels map[string]string) *v1.Affinity { // 3. for each node: // 3a. enable scheduling on that node // 3b. increase number of replicas in RC by podsPerNode -func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error { +func runReplicatedPodOnEachNode(ctx context.Context, f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error { ginkgo.By("Run a pod on each node") for _, node := range nodes { - err := makeNodeUnschedulable(f.ClientSet, &node) + err := makeNodeUnschedulable(ctx, f.ClientSet, &node) n := node ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false) @@ -1679,16 +1678,16 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa Labels: labels, MemRequest: memRequest, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if err != nil { return err } - rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{}) if err != nil { return err } for i, node := range nodes { - err = makeNodeSchedulable(f.ClientSet, &node, false) + err = makeNodeSchedulable(ctx, f.ClientSet, &node, false) if err != nil { return err } @@ -1697,7 +1696,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa // (we retry 409 errors in case rc reference got out of sync) for j := 0; j < 3; j++ { *rc.Spec.Replicas = int32((i + 1) * podsPerNode) - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{}) if err == nil { break } @@ -1705,14 +1704,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa return err } klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{}) if err != nil { return err } } err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) { - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{}) if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) { return false, nil } @@ -1721,7 +1720,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if err != nil { return fmt.Errorf("failed to coerce RC into spawning a pod on node %s within timeout", node.Name) } - err = makeNodeUnschedulable(f.ClientSet, &node) + err = makeNodeUnschedulable(ctx, f.ClientSet, &node) if err != nil { return err } @@ -1731,7 +1730,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa // Increase cluster size by newNodesForScaledownTests to create some unused nodes // that can be later removed by cluster autoscaler. -func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int { +func manuallyIncreaseClusterSize(ctx context.Context, f *framework.Framework, originalSizes map[string]int) int { ginkgo.By("Manually increase cluster size") increasedSize := 0 newSizes := make(map[string]int) @@ -1752,14 +1751,14 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin return false } - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout)) + framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, checkClusterSize, manualResizeTimeout)) return increasedSize } // Try to get clusterwide health from CA status configmap. // Status configmap is not parsing-friendly, so evil regexpery follows. -func getClusterwideStatus(c clientset.Interface) (string, error) { - configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) +func getClusterwideStatus(ctx context.Context, c clientset.Interface) (string, error) { + configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { return "", err } @@ -1807,8 +1806,8 @@ func getStatusTimestamp(status string) (time.Time, error) { // Try to get scaleup statuses of all node groups. // Status configmap is not parsing-friendly, so evil regexpery follows. -func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) { - configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) +func getScaleUpStatus(ctx context.Context, c clientset.Interface) (*scaleUpStatus, error) { + configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { return nil, err } @@ -1856,11 +1855,11 @@ func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) { return &result, nil } -func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) { +func waitForScaleUpStatus(ctx context.Context, c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) { var finalErr error var status *scaleUpStatus - err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { - status, finalErr = getScaleUpStatus(c) + err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, func(ctx context.Context) (bool, error) { + status, finalErr = getScaleUpStatus(ctx, c) if finalErr != nil { return false, nil } @@ -1879,15 +1878,15 @@ func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) boo // This is a temporary fix to allow CA to migrate some kube-system pods // TODO: Remove this when the PDB is added for some of those components -func addKubeSystemPdbs(f *framework.Framework) error { +func addKubeSystemPdbs(ctx context.Context, f *framework.Framework) error { ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required") var newPdbs []string - cleanup := func() { + cleanup := func(ctx context.Context) { var finalErr error for _, newPdbName := range newPdbs { ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName)) - err := f.ClientSet.PolicyV1().PodDisruptionBudgets("kube-system").Delete(context.TODO(), newPdbName, metav1.DeleteOptions{}) + err := f.ClientSet.PolicyV1().PodDisruptionBudgets("kube-system").Delete(ctx, newPdbName, metav1.DeleteOptions{}) if err != nil { // log error, but attempt to remove other pdbs klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) @@ -1926,7 +1925,7 @@ func addKubeSystemPdbs(f *framework.Framework) error { MinAvailable: &minAvailable, }, } - _, err := f.ClientSet.PolicyV1().PodDisruptionBudgets("kube-system").Create(context.TODO(), pdb, metav1.CreateOptions{}) + _, err := f.ClientSet.PolicyV1().PodDisruptionBudgets("kube-system").Create(ctx, pdb, metav1.CreateOptions{}) newPdbs = append(newPdbs, pdbName) if err != nil { @@ -1936,13 +1935,13 @@ func addKubeSystemPdbs(f *framework.Framework) error { return nil } -func createPriorityClasses(f *framework.Framework) { +func createPriorityClasses(ctx context.Context, f *framework.Framework) { priorityClasses := map[string]int32{ expendablePriorityClassName: -15, highPriorityClassName: 1000, } for className, priority := range priorityClasses { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}, metav1.CreateOptions{}) if err != nil { klog.Errorf("Error creating priority class: %v", err) } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index ee0d0b3fd9b..bd93efcfdfb 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -77,7 +77,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut deployment: monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale up with two metrics", func(ctx context.Context) { @@ -112,7 +112,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut deployment: monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale down with Prometheus", func(ctx context.Context) { @@ -131,7 +131,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut deployment: monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) }) @@ -154,7 +154,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale down to 0", func(ctx context.Context) { @@ -175,7 +175,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 0, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) }) @@ -201,7 +201,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target", metricValue), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale down with target average value", func(ctx context.Context) { @@ -225,7 +225,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target_average", externalMetricValue), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale up with two metrics", func(ctx context.Context) { @@ -266,7 +266,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), } - tc.Run() + tc.Run(ctx) }) }) @@ -297,7 +297,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut scaledReplicas: 3, deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} - tc.Run() + tc.Run(ctx) }) ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func(ctx context.Context) { @@ -317,7 +317,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} - tc.Run() + tc.Run(ctx) }) ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func(ctx context.Context) { @@ -347,7 +347,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut verifyStability: true, deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} - tc.Run() + tc.Run(ctx) }) ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func(ctx context.Context) { @@ -374,7 +374,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut verifyStability: true, deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} - tc.Run() + tc.Run(ctx) }) }) @@ -393,10 +393,9 @@ type CustomMetricTestCase struct { } // Run starts test case. -func (tc *CustomMetricTestCase) Run() { +func (tc *CustomMetricTestCase) Run(ctx context.Context) { projectID := framework.TestContext.CloudConfig.ProjectID - ctx := context.Background() client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) if err != nil { framework.Failf("Failed to initialize gcm default client, %v", err) @@ -433,38 +432,38 @@ func (tc *CustomMetricTestCase) Run() { } // Run application that exports the metric - err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) + err = createDeploymentToScale(ctx, tc.framework, tc.kubeClient, tc.deployment, tc.pod) if err != nil { framework.Failf("Failed to create stackdriver-exporter pod: %v", err) } ginkgo.DeferCleanup(cleanupDeploymentsToScale, tc.framework, tc.kubeClient, tc.deployment, tc.pod) // Wait for the deployment to run - waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) + waitForReplicas(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) // Autoscale the deployment - _, err = tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa, metav1.CreateOptions{}) + _, err = tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(ctx, tc.hpa, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create HPA: %v", err) } ginkgo.DeferCleanup(framework.IgnoreNotFound(tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{}) - waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) + waitForReplicas(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) if tc.verifyStability { - ensureDesiredReplicasInRange(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, tc.scaledReplicas, tc.scaledReplicas, 10*time.Minute) + ensureDesiredReplicasInRange(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, tc.scaledReplicas, tc.scaledReplicas, 10*time.Minute) } } -func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error { +func createDeploymentToScale(ctx context.Context, f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error { if deployment != nil { - _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment, metav1.CreateOptions{}) + _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(ctx, deployment, metav1.CreateOptions{}) if err != nil { return err } } if pod != nil { - _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return err } @@ -472,12 +471,12 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep return nil } -func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) { +func cleanupDeploymentsToScale(ctx context.Context, f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) { if deployment != nil { - _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, metav1.DeleteOptions{}) + _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(ctx, deployment.ObjectMeta.Name, metav1.DeleteOptions{}) } if pod != nil { - _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, metav1.DeleteOptions{}) + _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}) } } @@ -598,10 +597,10 @@ func hpa(name, namespace, deploymentName string, minReplicas, maxReplicas int32, } } -func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { +func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { interval := 20 * time.Second - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) } @@ -614,10 +613,10 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t } } -func ensureDesiredReplicasInRange(deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) { +func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) { interval := 60 * time.Second - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index bbe789de626..2165af35154 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -56,19 +56,19 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { var DNSParams2 DNSParamsLinear var DNSParams3 DNSParamsLinear - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") c = f.ClientSet - nodes, err := e2enode.GetReadySchedulableNodes(c) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) nodeCount := len(nodes.Items) ginkgo.By("Collecting original replicas count and DNS scaling params") - originDNSReplicasCount, err = getDNSReplicas(c) + originDNSReplicasCount, err = getDNSReplicas(ctx, c) framework.ExpectNoError(err) - pcm, err := fetchDNSScalingConfigMap(c) + pcm, err := fetchDNSScalingConfigMap(ctx, c) framework.ExpectNoError(err) previousParams = pcm.Data @@ -105,25 +105,25 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { // This test is separated because it is slow and need to run serially. // Will take around 5 minutes to run on a 4 nodes cluster. ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) { - numNodes, err := e2enode.TotalRegistered(c) + numNodes, err := e2enode.TotalRegistered(ctx, c) framework.ExpectNoError(err) ginkgo.By("Replace the dns autoscaling parameters with testing parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) framework.ExpectNoError(err) defer func() { ginkgo.By("Restoring initial dns autoscaling parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams)) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams)) framework.ExpectNoError(err) ginkgo.By("Wait for number of running and ready kube-dns pods recover") label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) - _, err := e2epod.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) + _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) framework.ExpectNoError(err) }() ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear := getExpectReplicasFuncLinear(ctx, c, &DNSParams1) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) originalSizes := make(map[string]int) @@ -140,86 +140,86 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { increasedSizes[key] = val + 1 } setMigSizes(increasedSizes) - err = WaitForClusterSizeFunc(c, + err = WaitForClusterSizeFunc(ctx, c, func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams1) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams3) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("Restoring cluster size") setMigSizes(originalSizes) - err = e2enode.WaitForReadyNodes(c, numNodes, scaleDownTimeout) + err = e2enode.WaitForReadyNodes(ctx, c, numNodes, scaleDownTimeout) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) }) ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) { ginkgo.By("Replace the dns autoscaling parameters with testing parameters") - err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) + err := updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) framework.ExpectNoError(err) defer func() { ginkgo.By("Restoring initial dns autoscaling parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams)) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams)) framework.ExpectNoError(err) }() ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear := getExpectReplicasFuncLinear(ctx, c, &DNSParams1) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---") ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams3) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---") ginkgo.By("Delete the ConfigMap for autoscaler") - err = deleteDNSScalingConfigMap(c) + err = deleteDNSScalingConfigMap(ctx, c) framework.ExpectNoError(err) ginkgo.By("Wait for the ConfigMap got re-created") - _, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout) + _, err = waitForDNSConfigMapCreated(ctx, c, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams2))) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams2))) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams2) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams2) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---") ginkgo.By("Delete the autoscaler pod for kube-dns") - err = deleteDNSAutoscalerPod(c) + err = deleteDNSAutoscalerPod(ctx, c) framework.ExpectNoError(err) ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") - err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) + err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) framework.ExpectNoError(err) ginkgo.By("Wait for kube-dns scaled to expected number") - getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1) - err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) + getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams1) + err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout) framework.ExpectNoError(err) }) }) @@ -234,11 +234,11 @@ type DNSParamsLinear struct { type getExpectReplicasFunc func(c clientset.Interface) int -func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc { +func getExpectReplicasFuncLinear(ctx context.Context, c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc { return func(c clientset.Interface) int { var replicasFromNodes float64 var replicasFromCores float64 - nodes, err := e2enode.GetReadyNodesIncludingTainted(c) + nodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c) framework.ExpectNoError(err) if params.nodesPerReplica > 0 { replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica) @@ -260,16 +260,16 @@ func getSchedulableCores(nodes []v1.Node) int64 { return sc.Value() } -func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { - cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), DNSAutoscalerLabelName, metav1.GetOptions{}) +func fetchDNSScalingConfigMap(ctx context.Context, c clientset.Interface) (*v1.ConfigMap, error) { + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, DNSAutoscalerLabelName, metav1.GetOptions{}) if err != nil { return nil, err } return cm, nil } -func deleteDNSScalingConfigMap(c clientset.Interface) error { - if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil { +func deleteDNSScalingConfigMap(ctx context.Context, c clientset.Interface) error { + if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") @@ -294,8 +294,8 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap { return &configMap } -func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { - _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}) +func updateDNSScalingConfigMap(ctx context.Context, c clientset.Interface, configMap *v1.ConfigMap) error { + _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(ctx, configMap, metav1.UpdateOptions{}) if err != nil { return err } @@ -303,10 +303,10 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e return nil } -func getDNSReplicas(c clientset.Interface) (int, error) { +func getDNSReplicas(ctx context.Context, c clientset.Interface) (int, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(context.TODO(), listOpts) + deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(ctx, listOpts) if err != nil { return 0, err } @@ -318,10 +318,10 @@ func getDNSReplicas(c clientset.Interface) (int, error) { return int(*(deployment.Spec.Replicas)), nil } -func deleteDNSAutoscalerPod(c clientset.Interface) error { +func deleteDNSAutoscalerPod(ctx context.Context, c clientset.Interface) error { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, listOpts) if err != nil { return err } @@ -330,19 +330,19 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { } podName := pods.Items[0].Name - if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, podName, metav1.DeleteOptions{}); err != nil { return err } framework.Logf("DNS autoscaling pod %v deleted.", podName) return nil } -func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) { +func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) { var current int var expected int framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout) condition := func() (bool, error) { - current, err = getDNSReplicas(c) + current, err = getDNSReplicas(ctx, c) if err != nil { return false, err } @@ -361,10 +361,10 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep return nil } -func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) { +func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) { framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout) condition := func() (bool, error) { - configMap, err = fetchDNSScalingConfigMap(c) + configMap, err = fetchDNSScalingConfigMap(ctx, c) if err != nil { return false, nil } diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 9906f26c284..2a357594bc1 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -48,41 +48,41 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { - scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) + scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) }) ginkgo.It(titleDown+titleAverageUtilization, func(ctx context.Context) { - scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) + scaleDown(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) }) ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { - scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f) + scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f) }) }) ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { - scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f) + scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f) }) ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { - scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f) + scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f) }) }) ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { ginkgo.It(titleUp, func(ctx context.Context) { - scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) + scaleUp(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) ginkgo.It(titleDown, func(ctx context.Context) { - scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) + scaleDown(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) }) // These tests take ~20 minutes each. ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) { - scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) + scaleUp(ctx, "rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) }) ginkgo.It(titleDown+" and verify decision stability", func(ctx context.Context) { - scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) + scaleDown(ctx, "rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) }) }) @@ -99,7 +99,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C resourceType: cpuResource, metricTargetType: utilizationMetricType, } - st.run("rc-light", e2eautoscaling.KindRC, f) + st.run(ctx, "rc-light", e2eautoscaling.KindRC, f) }) ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) { st := &HPAScaleTest{ @@ -113,19 +113,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C resourceType: cpuResource, metricTargetType: utilizationMetricType, } - st.run("rc-light", e2eautoscaling.KindRC, f) + st.run(ctx, "rc-light", e2eautoscaling.KindRC, f) }) }) ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() { // ContainerResource CPU autoscaling on idle sidecar ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) { - scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) + scaleOnIdleSideCar(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) // ContainerResource CPU autoscaling on busy sidecar ginkgo.It("Should not scale up on a busy sidecar with an idle application", func(ctx context.Context) { - doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f) + doNotScaleOnBusySidecar(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f) }) }) @@ -142,7 +142,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C resourceType: cpuResource, metricTargetType: utilizationMetricType, } - scaleTest.run("foo-crd", e2eautoscaling.KindCRD, f) + scaleTest.run(ctx, "foo-crd", e2eautoscaling.KindCRD, f) }) }) }) @@ -153,19 +153,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: M ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { - scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f) + scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f) }) ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { - scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f) + scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f) }) }) ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { - scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f) + scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f) }) ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { - scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f) + scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f) }) }) }) @@ -194,7 +194,7 @@ type HPAScaleTest struct { // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. -func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { +func (st *HPAScaleTest) run(ctx context.Context, name string, kind schema.GroupVersionKind, f *framework.Framework) { const timeToWait = 15 * time.Minute initCPUTotal, initMemTotal := 0, 0 if st.resourceType == cpuResource { @@ -202,26 +202,26 @@ func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framew } else if st.resourceType == memResource { initMemTotal = st.initMemTotal } - rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) + hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name) - rc.WaitForReplicas(st.firstScale, timeToWait) + rc.WaitForReplicas(ctx, st.firstScale, timeToWait) if st.firstScaleStasis > 0 { - rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) } if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 { rc.ConsumeCPU(st.cpuBurst) - rc.WaitForReplicas(int(st.secondScale), timeToWait) + rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait) } if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 { rc.ConsumeMem(st.memBurst) - rc.WaitForReplicas(int(st.secondScale), timeToWait) + rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait) } } -func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { +func scaleUp(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { stasis := 0 * time.Minute if checkStability { stasis = 10 * time.Minute @@ -247,10 +247,10 @@ func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.Resource st.initMemTotal = 250 st.memBurst = 700 } - st.run(name, kind, f) + st.run(ctx, name, kind, f) } -func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { +func scaleDown(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { stasis := 0 * time.Minute if checkStability { stasis = 10 * time.Minute @@ -277,7 +277,7 @@ func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.Resour st.initMemTotal = 325 st.memBurst = 10 } - st.run(name, kind, f) + st.run(ctx, name, kind, f) } type HPAContainerResourceScaleTest struct { @@ -302,7 +302,7 @@ type HPAContainerResourceScaleTest struct { metricTargetType autoscalingv2.MetricTargetType } -func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { +func (st *HPAContainerResourceScaleTest) run(ctx context.Context, name string, kind schema.GroupVersionKind, f *framework.Framework) { const timeToWait = 15 * time.Minute initCPUTotal, initMemTotal := 0, 0 if st.resourceType == cpuResource { @@ -310,32 +310,32 @@ func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersi } else if st.resourceType == memResource { initMemTotal = st.initMemTotal } - rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType) + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) + hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name) if st.noScale { if st.noScaleStasis > 0 { - rc.EnsureDesiredReplicasInRange(st.initPods, st.initPods, st.noScaleStasis, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, st.initPods, st.initPods, st.noScaleStasis, hpa.Name) } } else { - rc.WaitForReplicas(st.firstScale, timeToWait) + rc.WaitForReplicas(ctx, st.firstScale, timeToWait) if st.firstScaleStasis > 0 { - rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) } if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 { rc.ConsumeCPU(st.cpuBurst) - rc.WaitForReplicas(int(st.secondScale), timeToWait) + rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait) } if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 { rc.ConsumeMem(st.memBurst) - rc.WaitForReplicas(int(st.secondScale), timeToWait) + rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait) } } } -func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) { +func scaleUpContainerResource(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) { st := &HPAContainerResourceScaleTest{ initPods: 1, perContainerCPURequest: 500, @@ -359,10 +359,10 @@ func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourc st.initMemTotal = 250 st.memBurst = 700 } - st.run(name, kind, f) + st.run(ctx, name, kind, f) } -func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { +func scaleOnIdleSideCar(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { // Scale up on a busy application with an idle sidecar container stasis := 0 * time.Minute if checkStability { @@ -384,10 +384,10 @@ func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType sidecarStatus: e2eautoscaling.Enable, sidecarType: e2eautoscaling.Idle, } - st.run(name, kind, f) + st.run(ctx, name, kind, f) } -func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { +func doNotScaleOnBusySidecar(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { // Do not scale up on a busy sidecar with an idle application stasis := 0 * time.Minute if checkStability { @@ -408,7 +408,7 @@ func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resource noScale: true, noScaleStasis: stasis, } - st.run(name, kind, f) + st.run(ctx, name, kind, f) } func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 { diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go index 0ebb9f67d6a..4dec6a5f228 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go @@ -61,14 +61,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n upScaleStabilization := 0 * time.Minute downScaleStabilization := 1 * time.Minute - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 5, e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), ) @@ -78,12 +78,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n // for stabilization logic before lowering the consumption ginkgo.By("triggering scale up to record a recommendation") rc.ConsumeCPU(3 * usageForSingleReplica) - rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) + rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) ginkgo.By("triggering scale down by lowering consumption") rc.ConsumeCPU(2 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) + rc.WaitForReplicas(ctx, 2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) timeWaited := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale down") @@ -102,14 +102,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n upScaleStabilization := 3 * time.Minute downScaleStabilization := 0 * time.Minute - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), ) @@ -119,12 +119,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n // for stabilization logic before increasing the consumption ginkgo.By("triggering scale down to record a recommendation") rc.ConsumeCPU(1 * usageForSingleReplica) - rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) + rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) ginkgo.By("triggering scale up by increasing consumption") rc.ConsumeCPU(3 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) + rc.WaitForReplicas(ctx, 3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) timeWaited := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale up") @@ -141,14 +141,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n initPods := 1 initCPUUsageTotal := initPods * usageForSingleReplica - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection), ) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name) @@ -159,7 +159,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(8 * usageForSingleReplica) waitStart := time.Now() - rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name) timeWaited := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale up") @@ -167,7 +167,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline) ginkgo.By("verifying number of replicas") - replicas := rc.GetReplicas() + replicas := rc.GetReplicas(ctx) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) }) @@ -176,14 +176,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n initPods := 3 initCPUUsageTotal := initPods * usageForSingleReplica - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection), ) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name) @@ -195,7 +195,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(1 * usageForSingleReplica) waitStart := time.Now() - rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name) timeWaited := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale down") @@ -203,7 +203,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline) ginkgo.By("verifying number of replicas") - replicas := rc.GetReplicas() + replicas := rc.GetReplicas(ctx) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) }) @@ -221,14 +221,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n limitWindowLength := 1 * time.Minute podsLimitPerMinute := 1 - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), ) @@ -238,11 +238,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(3 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor2 := time.Now().Sub(waitStart) waitStart = time.Now() - rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor3 := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale up to 2 replicas") @@ -263,14 +263,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n limitWindowLength := 1 * time.Minute podsLimitPerMinute := 1 - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), ) @@ -280,11 +280,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(1 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor2 := time.Now().Sub(waitStart) waitStart = time.Now() - rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor1 := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale down to 2 replicas") @@ -311,14 +311,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n limitWindowLength := 1 * time.Minute percentageLimitPerMinute := 50 - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), ) @@ -328,12 +328,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(8 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor3 := time.Now().Sub(waitStart) waitStart = time.Now() // Scale up limited by percentage takes ceiling, so new replicas number is ceil(3 * 1.5) = ceil(4.5) = 5 - rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor5 := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale up to 3 replicas") @@ -354,14 +354,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n limitWindowLength := 1 * time.Minute percentageLimitPerMinute := 25 - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), ) @@ -371,12 +371,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n rc.ConsumeCPU(1 * usageForSingleReplica) waitStart := time.Now() - rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor5 := time.Now().Sub(waitStart) waitStart = time.Now() // Scale down limited by percentage takes floor, so new replicas number is floor(5 * 0.75) = floor(3.75) = 3 - rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) + rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) timeWaitedFor3 := time.Now().Sub(waitStart) ginkgo.By("verifying time waited for a scale down to 5 replicas") @@ -401,14 +401,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n upScaleStabilization := 3 * time.Minute downScaleStabilization := 3 * time.Minute - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, ) ginkgo.DeferCleanup(rc.CleanUp) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 2, 5, e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), ) @@ -419,12 +419,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n waitDeadline := upScaleStabilization ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") - rc.EnsureDesiredReplicasInRange(2, 2, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, 2, 2, waitDeadline, hpa.Name) ginkgo.By("waiting for replicas to scale up after stabilisation window passed") waitStart := time.Now() waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer - rc.WaitForReplicas(4, waitDeadline) + rc.WaitForReplicas(ctx, 4, waitDeadline) timeWaited := time.Now().Sub(waitStart) framework.Logf("time waited for scale up: %s", timeWaited) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) @@ -434,12 +434,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n waitDeadline = downScaleStabilization ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") - rc.EnsureDesiredReplicasInRange(4, 4, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, 4, 4, waitDeadline, hpa.Name) ginkgo.By("waiting for replicas to scale down after stabilisation window passed") waitStart = time.Now() waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer - rc.WaitForReplicas(2, waitDeadline) + rc.WaitForReplicas(ctx, 2, waitDeadline) timeWaited = time.Now().Sub(waitStart) framework.Logf("time waited for scale down: %s", timeWaited) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) @@ -453,7 +453,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n limitWindowLength := 2 * time.Minute podsLimitPerMinute := 1 - rc := e2eautoscaling.NewDynamicResourceConsumer( + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, @@ -462,7 +462,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())) scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds())) - hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( + hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx, rc, int32(targetCPUUtilizationPercent), 2, 5, e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule), ) @@ -473,12 +473,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n waitDeadline := limitWindowLength ginkgo.By("verifying number of replicas stay in desired range with pod limit rate") - rc.EnsureDesiredReplicasInRange(2, 3, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, 2, 3, waitDeadline, hpa.Name) ginkgo.By("waiting for replicas to scale up") waitStart := time.Now() waitDeadline = limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer - rc.WaitForReplicas(4, waitDeadline) + rc.WaitForReplicas(ctx, 4, waitDeadline) timeWaited := time.Now().Sub(waitStart) framework.Logf("time waited for scale up: %s", timeWaited) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) @@ -488,12 +488,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") waitDeadline = downScaleStabilization - rc.EnsureDesiredReplicasInRange(4, 4, waitDeadline, hpa.Name) + rc.EnsureDesiredReplicasInRange(ctx, 4, 4, waitDeadline, hpa.Name) ginkgo.By("waiting for replicas to scale down after stabilisation window passed") waitStart = time.Now() waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer - rc.WaitForReplicas(2, waitDeadline) + rc.WaitForReplicas(ctx, 2, waitDeadline) timeWaited = time.Now().Sub(waitStart) framework.Logf("time waited for scale down: %s", timeWaited) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) diff --git a/test/e2e/chaosmonkey/chaosmonkey.go b/test/e2e/chaosmonkey/chaosmonkey.go index 5c35bb4ca62..694cf7d43d7 100644 --- a/test/e2e/chaosmonkey/chaosmonkey.go +++ b/test/e2e/chaosmonkey/chaosmonkey.go @@ -17,20 +17,21 @@ limitations under the License. package chaosmonkey import ( + "context" "fmt" "github.com/onsi/ginkgo/v2" ) // Disruption is the type to construct a Chaosmonkey with; see Do for more information. -type Disruption func() +type Disruption func(ctx context.Context) // Test is the type to register with a Chaosmonkey. A test will run asynchronously across the // Chaosmonkey's Disruption. A Test takes a Semaphore as an argument. It should call sem.Ready() // once it's ready for the disruption to start and should then wait until sem.StopCh (which is a // <-chan struct{}) is closed, which signals that the disruption is over. It should then clean up // and return. See Do and Semaphore for more information. -type Test func(sem *Semaphore) +type Test func(ctx context.Context, sem *Semaphore) // Interface can be implemented if you prefer to define tests without dealing with a Semaphore. You // may define a struct that implements Interface's three methods (Setup, Test, and Teardown) and @@ -66,7 +67,7 @@ func (cm *Chaosmonkey) Register(test Test) { // call Setup, Test, and Teardown properly. Test can tell that the Disruption is finished when // stopCh is closed. func (cm *Chaosmonkey) RegisterInterface(in Interface) { - cm.Register(func(sem *Semaphore) { + cm.Register(func(ctx context.Context, sem *Semaphore) { in.Setup() sem.Ready() in.Test(sem.StopCh) @@ -79,7 +80,7 @@ func (cm *Chaosmonkey) RegisterInterface(in Interface) { // waits for each test to signal that it is ready by calling sem.Ready(). Do will then do the // Disruption, and when it's complete, close sem.StopCh to signal to the registered Tests that the // Disruption is over, and wait for all Tests to return. -func (cm *Chaosmonkey) Do() { +func (cm *Chaosmonkey) Do(ctx context.Context) { sems := []*Semaphore{} // All semaphores have the same StopCh. stopCh := make(chan struct{}) @@ -91,7 +92,7 @@ func (cm *Chaosmonkey) Do() { go func() { defer ginkgo.GinkgoRecover() defer sem.done() - test(sem) + test(ctx, sem) }() } @@ -112,7 +113,7 @@ func (cm *Chaosmonkey) Do() { }() fmt.Println("Starting disruption") - cm.disruption() + cm.disruption(ctx) fmt.Println("Disruption complete; stopping async validations") } diff --git a/test/e2e/chaosmonkey/chaosmonkey_test.go b/test/e2e/chaosmonkey/chaosmonkey_test.go index f5a9dcf57a3..d5a981573d6 100644 --- a/test/e2e/chaosmonkey/chaosmonkey_test.go +++ b/test/e2e/chaosmonkey/chaosmonkey_test.go @@ -17,27 +17,28 @@ limitations under the License. package chaosmonkey import ( + "context" "sync/atomic" "testing" ) func TestDoWithPanic(t *testing.T) { var counter int64 - cm := New(func() {}) + cm := New(func(ctx context.Context) {}) tests := []Test{ // No panic - func(sem *Semaphore) { + func(ctx context.Context, sem *Semaphore) { defer atomic.AddInt64(&counter, 1) sem.Ready() }, // Panic after sem.Ready() - func(sem *Semaphore) { + func(ctx context.Context, sem *Semaphore) { defer atomic.AddInt64(&counter, 1) sem.Ready() panic("Panic after calling sem.Ready()") }, // Panic before sem.Ready() - func(sem *Semaphore) { + func(ctx context.Context, sem *Semaphore) { defer atomic.AddInt64(&counter, 1) panic("Panic before calling sem.Ready()") }, @@ -45,7 +46,7 @@ func TestDoWithPanic(t *testing.T) { for _, test := range tests { cm.Register(test) } - cm.Do() + cm.Do(context.Background()) // Check that all funcs in tests were called. if int(counter) != len(tests) { t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter) diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go index 38c3ed15136..63cf281a354 100644 --- a/test/e2e/cloud/gcp/addon_update.go +++ b/test/e2e/cloud/gcp/addon_update.go @@ -304,13 +304,13 @@ var _ = SIGDescribe("Addon update", func() { // Delete the "ensure exist class" addon at the end. defer func() { framework.Logf("Cleaning up ensure exist class addon.") - err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", metav1.DeleteOptions{}) + err := f.ClientSet.CoreV1().Services(addonNsName).Delete(ctx, "addon-ensure-exists-test", metav1.DeleteOptions{}) framework.ExpectNoError(err) }() - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true) - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", true) - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) + waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", true) + waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", true) + waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true) // Replace the manifests with new contents. ginkgo.By("update manifests") @@ -320,52 +320,52 @@ var _ = SIGDescribe("Addon update", func() { // Wait for updated addons to have the new added label. reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"})) - waitForReplicationControllerwithSelectorInAddonTest(f.ClientSet, addonNsName, true, reconcileSelector) + waitForReplicationControllerwithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, reconcileSelector) deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"})) - waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, true, deprecatedLabelSelector) + waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, deprecatedLabelSelector) // "Ensure exist class" addon should not be updated. ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"})) - waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector) + waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, false, ensureExistSelector) ginkgo.By("remove manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists)) - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", false) - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", false) + waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", false) + waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", false) // "Ensure exist class" addon will not be deleted when manifest is removed. - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) + waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true) ginkgo.By("verify invalid addons weren't created") - _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(context.TODO(), "invalid-addon-test", metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(ctx, "invalid-addon-test", metav1.GetOptions{}) framework.ExpectError(err) // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. }) }) -func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { - framework.ExpectNoError(e2enetwork.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) +func waitForServiceInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) { + framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } -func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { - framework.ExpectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) +func waitForReplicationControllerInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) { + framework.ExpectNoError(waitForReplicationController(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } -func waitForServicewithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { - framework.ExpectNoError(waitForServiceWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) +func waitForServicewithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { + framework.ExpectNoError(waitForServiceWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) } -func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { - framework.ExpectNoError(waitForReplicationControllerWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, +func waitForReplicationControllerwithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { + framework.ExpectNoError(waitForReplicationControllerWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) } // waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) -func waitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func waitForReplicationController(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + _, err := c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) return !exist, nil @@ -381,10 +381,10 @@ func waitForReplicationController(c clientset.Interface, namespace, name string, } // waitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false) -func waitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, +func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - services, err := c.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + services, err := c.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(services.Items) != 0: framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace) @@ -408,10 +408,10 @@ func waitForServiceWithSelector(c clientset.Interface, namespace string, selecto } // waitForReplicationControllerWithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false) -func waitForReplicationControllerWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, +func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - rcs, err := c.CoreV1().ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + rcs, err := c.CoreV1().ReplicationControllers(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(rcs.Items) != 0: framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) diff --git a/test/e2e/cloud/gcp/apps/stateful_apps.go b/test/e2e/cloud/gcp/apps/stateful_apps.go index 69b625a7047..8d166eb7260 100644 --- a/test/e2e/cloud/gcp/apps/stateful_apps.go +++ b/test/e2e/cloud/gcp/apps/stateful_apps.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() { ginkgo.Describe("stateful upgrade", func() { ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) { - e2epv.SkipIfNoDefaultStorageClass(f.ClientSet) + e2epv.SkipIfNoDefaultStorageClass(ctx, f.ClientSet) upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -52,7 +52,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, statefulUpgradeTest) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, statefulUpgradeTest, nil, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) }) diff --git a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go index dea4951f0b3..f0b70665cb6 100644 --- a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go +++ b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go @@ -51,7 +51,7 @@ var _ = SIGDescribe("ServiceAccount admission controller migration [Feature:Boun testSuite.TestCases = append(testSuite.TestCases, serviceaccountAdmissionControllerMigrationTest) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, serviceaccountAdmissionControllerMigrationTest, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) }) }) }) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 0cda019fc37..bddfa05759b 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -72,7 +72,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, masterUpgradeTest, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) }) }) @@ -86,7 +86,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, clusterUpgradeTest, nil, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) }) @@ -106,7 +106,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, clusterDowngradeTest, nil, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) }) diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index 93c1fb6ef8f..763525e5c0f 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "os" "os/exec" @@ -34,48 +35,48 @@ import ( ) // ControlPlaneUpgradeFunc returns a function that performs control plane upgrade. -func ControlPlaneUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs []string) func() { - return func() { +func ControlPlaneUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs []string) func(ctx context.Context) { + return func(ctx context.Context) { target := upgCtx.Versions[1].Version.String() - framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) - framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) + framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs)) + framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target)) } } // ClusterUpgradeFunc returns a function that performs full cluster upgrade (both control plane and nodes). -func ClusterUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func() { - return func() { +func ClusterUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func(ctx context.Context) { + return func(ctx context.Context) { target := upgCtx.Versions[1].Version.String() image := upgCtx.Versions[1].NodeImage - framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) - framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) - framework.ExpectNoError(nodeUpgrade(f, target, image, nodeExtraEnvs)) - framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) + framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs)) + framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target)) + framework.ExpectNoError(nodeUpgrade(ctx, f, target, image, nodeExtraEnvs)) + framework.ExpectNoError(checkNodesVersions(ctx, f.ClientSet, target)) } } // ClusterDowngradeFunc returns a function that performs full cluster downgrade (both nodes and control plane). -func ClusterDowngradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func() { - return func() { +func ClusterDowngradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func(ctx context.Context) { + return func(ctx context.Context) { target := upgCtx.Versions[1].Version.String() image := upgCtx.Versions[1].NodeImage // Yes this really is a downgrade. And nodes must downgrade first. - framework.ExpectNoError(nodeUpgrade(f, target, image, nodeExtraEnvs)) - framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) - framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) - framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) + framework.ExpectNoError(nodeUpgrade(ctx, f, target, image, nodeExtraEnvs)) + framework.ExpectNoError(checkNodesVersions(ctx, f.ClientSet, target)) + framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs)) + framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target)) } } const etcdImage = "3.4.9-1" // controlPlaneUpgrade upgrades control plane node on GCE/GKE. -func controlPlaneUpgrade(f *framework.Framework, v string, extraEnvs []string) error { +func controlPlaneUpgrade(ctx context.Context, f *framework.Framework, v string, extraEnvs []string) error { switch framework.TestContext.Provider { case "gce": return controlPlaneUpgradeGCE(v, extraEnvs) case "gke": - return e2eproviders.MasterUpgradeGKE(f.Namespace.Name, v) + return e2eproviders.MasterUpgradeGKE(ctx, f.Namespace.Name, v) default: return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } @@ -117,11 +118,11 @@ func traceRouteToControlPlane() { } // checkControlPlaneVersion validates the control plane version -func checkControlPlaneVersion(c clientset.Interface, want string) error { +func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want string) error { framework.Logf("Checking control plane version") var err error var v *version.Info - waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { v, err = c.Discovery().ServerVersion() if err != nil { traceRouteToControlPlane() @@ -144,21 +145,21 @@ func checkControlPlaneVersion(c clientset.Interface, want string) error { } // nodeUpgrade upgrades nodes on GCE/GKE. -func nodeUpgrade(f *framework.Framework, v string, img string, extraEnvs []string) error { +func nodeUpgrade(ctx context.Context, f *framework.Framework, v string, img string, extraEnvs []string) error { // Perform the upgrade. var err error switch framework.TestContext.Provider { case "gce": err = nodeUpgradeGCE(v, img, extraEnvs) case "gke": - err = nodeUpgradeGKE(f.Namespace.Name, v, img) + err = nodeUpgradeGKE(ctx, f.Namespace.Name, v, img) default: err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } if err != nil { return err } - return waitForNodesReadyAfterUpgrade(f) + return waitForNodesReadyAfterUpgrade(ctx, f) } // TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default. @@ -174,7 +175,7 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error { return err } -func nodeUpgradeGKE(namespace string, v string, img string) error { +func nodeUpgradeGKE(ctx context.Context, namespace string, v string, img string) error { framework.Logf("Upgrading nodes to version %q and image %q", v, img) nps, err := nodePoolsGKE() if err != nil { @@ -202,7 +203,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error { return err } - e2enode.WaitForSSHTunnels(namespace) + e2enode.WaitForSSHTunnels(ctx, namespace) } return nil } @@ -227,25 +228,25 @@ func nodePoolsGKE() ([]string, error) { return strings.Fields(stdout), nil } -func waitForNodesReadyAfterUpgrade(f *framework.Framework) error { +func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error { // Wait for it to complete and validate nodes are healthy. // // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // GKE; the operation shouldn't return until they all are. - numNodes, err := e2enode.TotalRegistered(f.ClientSet) + numNodes, err := e2enode.TotalRegistered(ctx, f.ClientSet) if err != nil { return fmt.Errorf("couldn't detect number of nodes") } framework.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", framework.RestartNodeReadyAgainTimeout, numNodes) - if _, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil { + if _, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil { return err } return nil } // checkNodesVersions validates the nodes versions -func checkNodesVersions(cs clientset.Interface, want string) error { - l, err := e2enode.GetReadySchedulableNodes(cs) +func checkNodesVersions(ctx context.Context, cs clientset.Interface, want string) error { + l, err := e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { return err } diff --git a/test/e2e/cloud/gcp/gke_node_pools.go b/test/e2e/cloud/gcp/gke_node_pools.go index e9a81005d7e..9706a51131e 100644 --- a/test/e2e/cloud/gcp/gke_node_pools.go +++ b/test/e2e/cloud/gcp/gke_node_pools.go @@ -40,11 +40,11 @@ var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() { ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) { framework.Logf("Start create node pool test") - testCreateDeleteNodePool(f, "test-pool") + testCreateDeleteNodePool(ctx, f, "test-pool") }) }) -func testCreateDeleteNodePool(f *framework.Framework, poolName string) { +func testCreateDeleteNodePool(ctx context.Context, f *framework.Framework, poolName string) { framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster) @@ -67,7 +67,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { framework.Logf("Node pools:\n%s", string(out)) framework.Logf("Checking that 2 nodes have the correct node pool label.") - nodeCount := nodesWithPoolLabel(f, poolName) + nodeCount := nodesWithPoolLabel(ctx, f, poolName) if nodeCount != 2 { framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) } @@ -92,7 +92,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { framework.Logf("\nNode pools:\n%s", string(out)) framework.Logf("Checking that no nodes have the deleted node pool's label.") - nodeCount = nodesWithPoolLabel(f, poolName) + nodeCount = nodesWithPoolLabel(ctx, f, poolName) if nodeCount != 0 { framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) } @@ -101,9 +101,9 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { // nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool" // label with the given node pool name. -func nodesWithPoolLabel(f *framework.Framework, poolName string) int { +func nodesWithPoolLabel(ctx context.Context, f *framework.Framework, poolName string) int { nodeCount := 0 - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) for _, node := range nodeList.Items { if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName { diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 987bc7409ee..017f1d5da2a 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -74,9 +74,9 @@ func removeWorkerNodes(zone string) error { return nil } -func verifyRCs(c clientset.Interface, ns string, names []string) { +func verifyRCs(ctx context.Context, c clientset.Interface, ns string, names []string) { for _, name := range names { - framework.ExpectNoError(e2epod.VerifyPods(c, ns, name, true, 1)) + framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, name, true, 1)) } } @@ -124,9 +124,9 @@ func generateMasterRegexp(prefix string) string { } // waitForMasters waits until the cluster has the desired number of ready masters in it. -func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { +func waitForMasters(ctx context.Context, masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list nodes: %v", err) continue @@ -169,27 +169,27 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { var additionalNodesZones []string var existingRCs []string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) + framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) additionalReplicaZones = make([]string, 0) existingRCs = make([]string, 0) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Clean-up additional worker nodes if the test execution was broken. for _, zone := range additionalNodesZones { removeWorkerNodes(zone) } - framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) + framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute)) // Clean-up additional master replicas if the test execution was broken. for _, zone := range additionalReplicaZones { removeMasterReplica(zone) } - framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) + framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) }) type Action int @@ -201,7 +201,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { RemoveNodes ) - step := func(action Action, zone string) { + step := func(ctx context.Context, action Action, zone string) { switch action { case None: case AddReplica: @@ -217,25 +217,25 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { framework.ExpectNoError(removeWorkerNodes(zone)) additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone) } - framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) - framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) + framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) + framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute)) // Verify that API server works correctly with HA master. rcName := "ha-master-" + strconv.Itoa(len(existingRCs)) createNewRC(c, ns, rcName) existingRCs = append(existingRCs, rcName) - verifyRCs(c, ns, existingRCs) + verifyRCs(ctx, c, ns, existingRCs) } ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone - step(None, "") + step(ctx, None, "") numAdditionalReplicas := 2 for i := 0; i < numAdditionalReplicas; i++ { - step(AddReplica, zone) + step(ctx, AddReplica, zone) } for i := 0; i < numAdditionalReplicas; i++ { - step(RemoveReplica, zone) + step(ctx, RemoveReplica, zone) } }) @@ -245,15 +245,15 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { zones := findZonesForRegion(region) zones = removeZoneFromZones(zones, zone) - step(None, "") + step(ctx, None, "") // If numAdditionalReplicas is larger then the number of remaining zones in the region, // we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones. numAdditionalReplicas := 2 for i := 0; i < numAdditionalReplicas; i++ { - step(AddReplica, zones[i%len(zones)]) + step(ctx, AddReplica, zones[i%len(zones)]) } for i := 0; i < numAdditionalReplicas; i++ { - step(RemoveReplica, zones[i%len(zones)]) + step(ctx, RemoveReplica, zones[i%len(zones)]) } }) @@ -263,12 +263,12 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { zones := findZonesForRegion(region) zones = removeZoneFromZones(zones, zone) - step(None, "") + step(ctx, None, "") numAdditionalReplicas := 2 // Add worker nodes. for i := 0; i < numAdditionalReplicas && i < len(zones); i++ { - step(AddNodes, zones[i]) + step(ctx, AddNodes, zones[i]) } // Add master repilcas. @@ -276,17 +276,17 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { // If numAdditionalReplicas is larger then the number of remaining zones in the region, // we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones. for i := 0; i < numAdditionalReplicas; i++ { - step(AddReplica, zones[i%len(zones)]) + step(ctx, AddReplica, zones[i%len(zones)]) } // Remove master repilcas. for i := 0; i < numAdditionalReplicas; i++ { - step(RemoveReplica, zones[i%len(zones)]) + step(ctx, RemoveReplica, zones[i%len(zones)]) } // Remove worker nodes. for i := 0; i < numAdditionalReplicas && i < len(zones); i++ { - step(RemoveNodes, zones[i]) + step(ctx, RemoveNodes, zones[i]) } }) }) diff --git a/test/e2e/cloud/gcp/kubelet_security.go b/test/e2e/cloud/gcp/kubelet_security.go index e88de971dc6..f106686ad1f 100644 --- a/test/e2e/cloud/gcp/kubelet_security.go +++ b/test/e2e/cloud/gcp/kubelet_security.go @@ -40,16 +40,16 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { var node *v1.Node var nodeName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var err error - node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name }) // make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func(ctx context.Context) { - result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort) + result, err := e2ekubelet.ProxyRequest(ctx, f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort) framework.ExpectNoError(err) var statusCode int @@ -57,7 +57,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { framework.ExpectNotEqual(statusCode, http.StatusOK) }) ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func(ctx context.Context) { - result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194) + result, err := e2ekubelet.ProxyRequest(ctx, f.ClientSet, nodeName, "containers/", 4194) framework.ExpectNoError(err) var statusCode int diff --git a/test/e2e/cloud/gcp/network/kube_proxy_migration.go b/test/e2e/cloud/gcp/network/kube_proxy_migration.go index 2e51911b790..5020faccc14 100644 --- a/test/e2e/cloud/gcp/network/kube_proxy_migration.go +++ b/test/e2e/cloud/gcp/network/kube_proxy_migration.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" extraEnvs := kubeProxyDaemonSetExtraEnvs(true) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, kubeProxyUpgradeTest, extraEnvs, extraEnvs) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, upgradeTestFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, upgradeTestFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) @@ -87,7 +87,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" extraEnvs := kubeProxyDaemonSetExtraEnvs(false) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, kubeProxyDowngradeTest, extraEnvs, extraEnvs) - upgrades.RunUpgradeSuite(upgCtx, downgradeTests, downgradeTestsFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, downgradeTests, downgradeTestsFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) }) diff --git a/test/e2e/cloud/gcp/node/gpu.go b/test/e2e/cloud/gcp/node/gpu.go index 4c668f794e5..02c77c98c60 100644 --- a/test/e2e/cloud/gcp/node/gpu.go +++ b/test/e2e/cloud/gcp/node/gpu.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) }) }) ginkgo.Describe("cluster upgrade", func() { @@ -61,7 +61,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) ginkgo.Describe("cluster downgrade", func() { @@ -74,7 +74,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { testSuite.TestCases = append(testSuite.TestCases, gpuDowngradeTest) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, gpuDowngradeTest, nil, nil) - upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) + upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) }) }) }) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index 336f8ff21e9..cef9a0f86be 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -43,10 +43,10 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { var ns string var group string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name - systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) + systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{}) framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { @@ -66,7 +66,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { skipped = false }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if skipped { return } @@ -91,30 +91,30 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { framework.Failf("Couldn't restore the original node instance group size: %v", err) } - if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { + if err := e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { framework.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) }) ginkgo.It("node lease should be deleted when corresponding node is deleted", func(ctx context.Context) { leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease) - err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute) + err := e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute) framework.ExpectNoError(err) ginkgo.By("verify node lease exists for every nodes") - originalNodes, err := e2enode.GetReadySchedulableNodes(c) + originalNodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { pass := true for _, node := range originalNodes.Items { - if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { + if _, err := leaseClient.Get(ctx, node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) pass = false } @@ -131,9 +131,9 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) framework.ExpectNoError(err) - err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) + err = e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) framework.ExpectNoError(err) - targetNodes, err := e2enode.GetReadySchedulableNodes(c) + targetNodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes)) @@ -150,17 +150,17 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { break } framework.ExpectNotEqual(deletedNodeName, "") - gomega.Eventually(func() error { - if _, err := leaseClient.Get(context.TODO(), deletedNodeName, metav1.GetOptions{}); err == nil { + gomega.Eventually(ctx, func() error { + if _, err := leaseClient.Get(ctx, deletedNodeName, metav1.GetOptions{}); err == nil { return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName) } return nil }, 1*time.Minute, 5*time.Second).Should(gomega.BeNil()) ginkgo.By("verify node leases still exist for remaining nodes") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { for _, node := range targetNodes.Items { - if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { + if _, err := leaseClient.Get(ctx, node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { return err } } diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index 152120ab2c9..8541d20ea5a 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -65,13 +65,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { // Most of the reboot tests just make sure that addon/system pods are running, so dump // events for the kube-system namespace on failures namespaceName := metav1.NamespaceSystem ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) - events, err := f.ClientSet.CoreV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(namespaceName).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -97,19 +97,19 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func(ctx context.Context) { // clean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. - testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil) + testReboot(ctx, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil) }) ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func(ctx context.Context) { // unclean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. - testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) + testReboot(ctx, f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) }) ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func(ctx context.Context) { // kernel panic // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. - testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) + testReboot(ctx, f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) }) ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func(ctx context.Context) { @@ -130,7 +130,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { "echo Starting systemd-networkd | sudo tee /dev/kmsg; " + "sudo systemctl restart systemd-networkd | sudo tee /dev/kmsg" + "' >/dev/null 2>&1 &" - testReboot(f.ClientSet, cmd, nil) + testReboot(ctx, f.ClientSet, cmd, nil) }) ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func(ctx context.Context) { @@ -138,7 +138,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We still accept packages send from localhost to prevent monit from restarting kubelet. tmpLogPath := "/tmp/drop-inbound.log" - testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath)) + testReboot(ctx, f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(ctx, tmpLogPath)) }) ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func(ctx context.Context) { @@ -146,13 +146,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We still accept packages send to localhost to prevent monit from restarting kubelet. tmpLogPath := "/tmp/drop-outbound.log" - testReboot(f.ClientSet, dropPacketsScript("OUTPUT", tmpLogPath), catLogHook(tmpLogPath)) + testReboot(ctx, f.ClientSet, dropPacketsScript("OUTPUT", tmpLogPath), catLogHook(ctx, tmpLogPath)) }) }) -func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { +func testReboot(ctx context.Context, c clientset.Interface, rebootCmd string, hook terminationHook) { // Get all nodes, and kick off the test on each. - nodelist, err := e2enode.GetReadySchedulableNodes(c) + nodelist, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err, "failed to list nodes") if hook != nil { defer func() { @@ -170,7 +170,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { defer ginkgo.GinkgoRecover() defer wg.Done() n := nodelist.Items[ix] - result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd) + result[ix] = rebootNode(ctx, c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd) if !result[ix] { failed = true } @@ -191,7 +191,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { } } -func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) { +func printStatusAndLogsForNotReadyPods(ctx context.Context, c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) { printFn := func(id, log string, err error, previous bool) { prefix := "Retrieving log for container" if previous { @@ -218,7 +218,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) - log, err := e2epod.GetPodLogs(c, p.Namespace, p.Name, container.Name) + log, err := e2epod.GetPodLogs(ctx, c, p.Namespace, p.Name, container.Name) printFn(cIdentifer, log, err, false) // Get log from the previous container. if container.RestartCount > 0 { @@ -238,7 +238,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName // // It returns true through result only if all of the steps pass; at the first // failed step, it will return false through result and not run the rest. -func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { +func rebootNode(ctx context.Context, c clientset.Interface, provider, name, rebootCmd string) bool { // Setup ns := metav1.NamespaceSystem ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector("spec.nodeName", name)) @@ -250,14 +250,14 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Get the node initially. framework.Logf("Getting %s", name) - node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Logf("Couldn't get node %s", name) return false } // Node sanity check: ensure it is "ready". - if !e2enode.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) { + if !e2enode.WaitForNodeToBeReady(ctx, c, name, framework.NodeReadyInitialTimeout) { return false } @@ -281,32 +281,32 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // For each pod, we do a sanity check to ensure it's running / healthy // or succeeded now, as that's what we'll be checking later. - if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) { - printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, podNames, framework.PodReadyBeforeTimeout) { + printStatusAndLogsForNotReadyPods(ctx, c, ns, podNames, pods) return false } // Reboot the node. - if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil { + if err = e2essh.IssueSSHCommand(ctx, rebootCmd, provider, node); err != nil { framework.Logf("Error while issuing ssh command: %v", err) return false } // Wait for some kind of "not ready" status. - if !e2enode.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { + if !e2enode.WaitForNodeToBeNotReady(ctx, c, name, rebootNodeNotReadyTimeout) { return false } // Wait for some kind of "ready" status. - if !e2enode.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { + if !e2enode.WaitForNodeToBeReady(ctx, c, name, rebootNodeReadyAgainTimeout) { return false } // Ensure all of the pods that we found on this node before the reboot are // running / healthy, or succeeded. - if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, podNames, rebootPodReadyAgainTimeout) { newPods := ps.List() - printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) + printStatusAndLogsForNotReadyPods(ctx, c, ns, podNames, newPods) return false } @@ -316,11 +316,11 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { type terminationHook func(provider string, nodes *v1.NodeList) -func catLogHook(logPath string) terminationHook { +func catLogHook(ctx context.Context, logPath string) terminationHook { return func(provider string, nodes *v1.NodeList) { for _, n := range nodes.Items { cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) - if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { + if _, err := e2essh.IssueSSHCommandWithResult(ctx, cmd, provider, &n); err != nil { framework.Logf("Error while issuing ssh command: %v", err) } } diff --git a/test/e2e/cloud/gcp/recreate_node.go b/test/e2e/cloud/gcp/recreate_node.go index 75f33429f68..87bb287107f 100644 --- a/test/e2e/cloud/gcp/recreate_node.go +++ b/test/e2e/cloud/gcp/recreate_node.go @@ -49,12 +49,12 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() { var originalPodNames []string var ps *testutils.PodStore systemNamespace := metav1.NamespaceSystem - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") var err error - numNodes, err := e2enode.TotalRegistered(f.ClientSet) + numNodes, err := e2enode.TotalRegistered(ctx, f.ClientSet) framework.ExpectNoError(err) - originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) + originalNodes, err = e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) framework.ExpectNoError(err) framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes)) @@ -68,18 +68,18 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() { originalPodNames[i] = p.ObjectMeta.Name } - if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { // Make sure that addon/system pods are running, so dump // events for the kube-system namespace on failures ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace)) - events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(context.TODO(), metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -92,23 +92,23 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() { }) ginkgo.It("recreate nodes and ensure they function upon restart", func(ctx context.Context) { - testRecreate(f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames) + testRecreate(ctx, f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames) }) }) // Recreate all the nodes in the test instance group -func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) { +func testRecreate(ctx context.Context, c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) { err := gce.RecreateNodes(c, nodes) if err != nil { framework.Failf("Test failed; failed to start the restart instance group command.") } - err = gce.WaitForNodeBootIdsToChange(c, nodes, recreateNodeReadyAgainTimeout) + err = gce.WaitForNodeBootIdsToChange(ctx, c, nodes, recreateNodeReadyAgainTimeout) if err != nil { framework.Failf("Test failed; failed to recreate at least one node in %v.", recreateNodeReadyAgainTimeout) } - nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) + nodesAfter, err := e2enode.CheckReady(ctx, c, len(nodes), framework.RestartNodeReadyAgainTimeout) framework.ExpectNoError(err) framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter)) @@ -119,10 +119,10 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace // Make sure the pods from before node recreation are running/completed podCheckStart := time.Now() - podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout) + podNamesAfter, err := e2epod.WaitForNRestartablePods(ctx, ps, len(podNames), framework.RestartPodReadyAgainTimeout) framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) - if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, systemNamespace, podNamesAfter, remaining) { framework.Failf("At least one pod wasn't running and ready after the restart.") } } diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index b8b1cc2358c..139635b7206 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -34,13 +34,13 @@ import ( "github.com/onsi/ginkgo/v2" ) -func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { - rc, err := c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func resizeRC(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) error { + rc, err := c.CoreV1().ReplicationControllers(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return err } *(rc.Spec.Replicas) = replicas - _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}) + _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(ctx, rc, metav1.UpdateOptions{}) return err } @@ -52,10 +52,10 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { var ns string var group string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name - systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) + systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{}) framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { @@ -93,13 +93,13 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.Failf("Couldn't restore the original node instance group size: %v", err) } - if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { + if err := e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount), 10*time.Minute); err != nil { framework.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) }) }) @@ -108,11 +108,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-delete-node" - numNodes, err := e2enode.TotalRegistered(c) + numNodes, err := e2enode.TotalRegistered(ctx, c) framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil, nil) - err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1) @@ -121,7 +121,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) framework.ExpectNoError(err) - err = e2enode.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute) + err = e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount-1), 10*time.Minute) framework.ExpectNoError(err) ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + @@ -129,7 +129,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort) ginkgo.By("verifying whether the pods from the removed node are recreated") - err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) }) @@ -139,11 +139,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-add-node" common.NewSVCByName(c, ns, name) - numNodes, err := e2enode.TotalRegistered(c) + numNodes, err := e2enode.TotalRegistered(ctx, c) framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil, nil) - err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1) @@ -152,13 +152,13 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) framework.ExpectNoError(err) - err = e2enode.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute) + err = e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount+1), 10*time.Minute) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1)) - err = resizeRC(c, ns, name, originalNodeCount+1) + err = resizeRC(ctx, c, ns, name, originalNodeCount+1) framework.ExpectNoError(err) - err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount+1) + err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount+1) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/cloud/gcp/restart.go b/test/e2e/cloud/gcp/restart.go index 3d23ebe3635..b52fe8c7886 100644 --- a/test/e2e/cloud/gcp/restart.go +++ b/test/e2e/cloud/gcp/restart.go @@ -52,19 +52,19 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { var numNodes int var systemNamespace string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // This test requires the ability to restart all nodes, so the provider // check must be identical to that call. e2eskipper.SkipUnlessProviderIs("gce", "gke") var err error ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything()) framework.ExpectNoError(err) - numNodes, err = e2enode.TotalRegistered(f.ClientSet) + numNodes, err = e2enode.TotalRegistered(ctx, f.ClientSet) framework.ExpectNoError(err) systemNamespace = metav1.NamespaceSystem ginkgo.By("ensuring all nodes are ready") - originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) + originalNodes, err = e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) framework.ExpectNoError(err) framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) @@ -76,8 +76,8 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { for i, p := range pods { originalPodNames[i] = p.ObjectMeta.Name } - if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { - printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { + printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, originalPodNames, pods) framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } }) @@ -94,7 +94,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { framework.ExpectNoError(err) ginkgo.By("ensuring all nodes are ready after the restart") - nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) + nodesAfter, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) framework.ExpectNoError(err) framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) @@ -111,12 +111,12 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { // across node restarts. ginkgo.By("ensuring the same number of pods are running and ready after restart") podCheckStart := time.Now() - podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) + podNamesAfter, err := e2epod.WaitForNRestartablePods(ctx, ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) - if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, podNamesAfter, remaining) { pods := ps.List() - printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) + printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, podNamesAfter, pods) framework.Failf("At least one pod wasn't running and ready after the restart.") } }) diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 8086c27974a..68da18a6654 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -47,10 +47,10 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func(ctx context.Context) { ginkgo.By("deleting a node on the cloud provider") - nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c) + nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(ctx, c) framework.ExpectNoError(err) - origNodes, err := e2enode.GetReadyNodesIncludingTainted(c) + origNodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } @@ -63,11 +63,11 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) } - newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute) + newNodes, err := e2enode.CheckReady(ctx, c, len(origNodes.Items)-1, 5*time.Minute) framework.ExpectNoError(err) framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1) - _, err = c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Nodes().Get(ctx, nodeToDelete.Name, metav1.GetOptions{}) if err == nil { framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) } else if !apierrors.IsNotFound(err) { diff --git a/test/e2e/common/network/networking.go b/test/e2e/common/network/networking.go index 452707edc78..55d84647115 100644 --- a/test/e2e/common/network/networking.go +++ b/test/e2e/common/network/networking.go @@ -33,13 +33,13 @@ var _ = SIGDescribe("Networking", func() { ginkgo.Describe("Granular Checks: Pods", func() { - checkPodToPodConnectivity := func(config *e2enetwork.NetworkingTestConfig, protocol string, port int) { + checkPodToPodConnectivity := func(ctx context.Context, config *e2enetwork.NetworkingTestConfig, protocol string, port int) { // breadth first poll to quickly estimate failure. failedPodsByHost := map[string][]*v1.Pod{} // First time, we'll quickly try all pods, breadth first. for _, endpointPod := range config.EndpointPods { framework.Logf("Breadth first check of %v on host %v...", endpointPod.Status.PodIP, endpointPod.Status.HostIP) - if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil { + if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil { if _, ok := failedPodsByHost[endpointPod.Status.HostIP]; !ok { failedPodsByHost[endpointPod.Status.HostIP] = []*v1.Pod{} } @@ -54,7 +54,7 @@ var _ = SIGDescribe("Networking", func() { framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host) for _, endpointPod := range failedPods { framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP) - if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil { + if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil { errors = append(errors, err) } else { framework.Logf("Was able to reach %v on %v ", endpointPod.Status.PodIP, endpointPod.Status.HostIP) @@ -82,8 +82,8 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) { - config := e2enetwork.NewCoreNetworkingTestConfig(f, false) - checkPodToPodConnectivity(config, "http", e2enetwork.EndpointHTTPPort) + config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false) + checkPodToPodConnectivity(ctx, config, "http", e2enetwork.EndpointHTTPPort) }) /* @@ -93,8 +93,8 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) { - config := e2enetwork.NewCoreNetworkingTestConfig(f, false) - checkPodToPodConnectivity(config, "udp", e2enetwork.EndpointUDPPort) + config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false) + checkPodToPodConnectivity(ctx, config, "udp", e2enetwork.EndpointUDPPort) }) /* @@ -105,9 +105,9 @@ var _ = SIGDescribe("Networking", func() { This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - config := e2enetwork.NewCoreNetworkingTestConfig(f, true) + config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true) for _, endpointPod := range config.EndpointPods { - err := config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) + err := config.DialFromNode(ctx, "http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) if err != nil { framework.Failf("Error dialing HTTP node to pod %v", err) } @@ -122,9 +122,9 @@ var _ = SIGDescribe("Networking", func() { This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - config := e2enetwork.NewCoreNetworkingTestConfig(f, true) + config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true) for _, endpointPod := range config.EndpointPods { - err := config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) + err := config.DialFromNode(ctx, "udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) if err != nil { framework.Failf("Error dialing UDP from node to pod: %v", err) } @@ -132,15 +132,15 @@ var _ = SIGDescribe("Networking", func() { }) ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) - checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) + checkPodToPodConnectivity(ctx, config, "sctp", e2enetwork.EndpointSCTPPort) }) ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482") - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) for _, endpointPod := range config.EndpointPods { - err := config.DialFromNode("sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) + err := config.DialFromNode(ctx, "sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) if err != nil { framework.Failf("Error dialing SCTP from node to pod: %v", err) } diff --git a/test/e2e/common/node/configmap.go b/test/e2e/common/node/configmap.go index 868e762a5f0..d715a2d1de2 100644 --- a/test/e2e/common/node/configmap.go +++ b/test/e2e/common/node/configmap.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("ConfigMap", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -80,7 +80,7 @@ var _ = SIGDescribe("ConfigMap", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{ "CONFIG_DATA_1=value-1", }) }) @@ -95,7 +95,7 @@ var _ = SIGDescribe("ConfigMap", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -124,7 +124,7 @@ var _ = SIGDescribe("ConfigMap", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{ "data-1=value-1", "data-2=value-2", "data-3=value-3", "p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3", }) @@ -136,7 +136,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail. */ framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) { - configMap, err := newConfigMapWithEmptyKey(f) + configMap, err := newConfigMapWithEmptyKey(ctx, f) framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name) }) @@ -144,17 +144,17 @@ var _ = SIGDescribe("ConfigMap", func() { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap") configMap.Data = map[string]string{ "data": "value", } ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update ConfigMap") - configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) + configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get ConfigMap") ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data) @@ -183,11 +183,11 @@ var _ = SIGDescribe("ConfigMap", func() { } ginkgo.By("creating a ConfigMap") - _, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(context.TODO(), &testConfigMap, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(ctx, &testConfigMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap") ginkgo.By("fetching the ConfigMap") - configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{}) + configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(ctx, testConfigMapName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get ConfigMap") framework.ExpectEqual(configMap.Data["valueName"], testConfigMap.Data["valueName"]) framework.ExpectEqual(configMap.Labels["test-configmap-static"], testConfigMap.Labels["test-configmap-static"]) @@ -205,11 +205,11 @@ var _ = SIGDescribe("ConfigMap", func() { framework.ExpectNoError(err, "failed to marshal patch data") ginkgo.By("patching the ConfigMap") - _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(ctx, testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ConfigMap") ginkgo.By("listing all ConfigMaps in all namespaces with a label selector") - configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(context.TODO(), metav1.ListOptions{ + configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(ctx, metav1.ListOptions{ LabelSelector: "test-configmap=patched", }) framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector") @@ -229,13 +229,13 @@ var _ = SIGDescribe("ConfigMap", func() { } ginkgo.By("deleting the ConfigMap by collection with a label selector") - err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "test-configmap-static=true", }) framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector") ginkgo.By("listing all ConfigMaps in test namespace") - configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(context.TODO(), metav1.ListOptions{ + configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(ctx, metav1.ListOptions{ LabelSelector: "test-configmap-static=true", }) framework.ExpectNoError(err, "failed to list ConfigMap by LabelSelector") @@ -257,7 +257,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap { } } -func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) { +func newConfigMapWithEmptyKey(ctx context.Context, f *framework.Framework) (*v1.ConfigMap, error) { name := "configmap-test-emptyKey-" + string(uuid.NewUUID()) configMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -270,5 +270,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) { } ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name)) - return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) } diff --git a/test/e2e/common/node/container.go b/test/e2e/common/node/container.go index df0ac44d22c..8af9b9ea655 100644 --- a/test/e2e/common/node/container.go +++ b/test/e2e/common/node/container.go @@ -50,7 +50,7 @@ type ConformanceContainer struct { } // Create creates the defined conformance container -func (cc *ConformanceContainer) Create() { +func (cc *ConformanceContainer) Create(ctx context.Context) { cc.podName = cc.Container.Name + string(uuid.NewUUID()) imagePullSecrets := []v1.LocalObjectReference{} for _, s := range cc.ImagePullSecrets { @@ -70,17 +70,17 @@ func (cc *ConformanceContainer) Create() { ImagePullSecrets: imagePullSecrets, }, } - cc.PodClient.Create(pod) + cc.PodClient.Create(ctx, pod) } // Delete deletes the defined conformance container -func (cc *ConformanceContainer) Delete() error { - return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0)) +func (cc *ConformanceContainer) Delete(ctx context.Context) error { + return cc.PodClient.Delete(ctx, cc.podName, *metav1.NewDeleteOptions(0)) } // IsReady returns whether this container is ready and error if any -func (cc *ConformanceContainer) IsReady() (bool, error) { - pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) +func (cc *ConformanceContainer) IsReady(ctx context.Context) (bool, error) { + pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -88,8 +88,8 @@ func (cc *ConformanceContainer) IsReady() (bool, error) { } // GetPhase returns the phase of the pod lifecycle and error if any -func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { - pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) +func (cc *ConformanceContainer) GetPhase(ctx context.Context) (v1.PodPhase, error) { + pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{}) if err != nil { // it doesn't matter what phase to return as error would not be nil return v1.PodSucceeded, err @@ -98,8 +98,8 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { } // GetStatus returns the details of the current status of this container and error if any -func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { - pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) +func (cc *ConformanceContainer) GetStatus(ctx context.Context) (v1.ContainerStatus, error) { + pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{}) if err != nil { return v1.ContainerStatus{}, err } @@ -111,8 +111,8 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { } // Present returns whether this pod is present and error if any -func (cc *ConformanceContainer) Present() (bool, error) { - _, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) +func (cc *ConformanceContainer) Present(ctx context.Context) (bool, error) { + _, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{}) if err == nil { return true, nil } diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index dd77b92d745..5f4e90d3e71 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -71,10 +71,10 @@ var _ = SIGDescribe("Probing container", func() { */ framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) { containerName := "test-webserver" - p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) - e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) + p := podClient.Create(ctx, testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)) - p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) @@ -106,16 +106,16 @@ var _ = SIGDescribe("Probing container", func() { then the Pod MUST never be ready, never be running and restart count MUST be zero. */ framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) { - p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) - gomega.Consistently(func() (bool, error) { - p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p := podClient.Create(ctx, testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) + gomega.Consistently(ctx, func() (bool, error) { + p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) if err != nil { return false, err } return podutil.IsPodReady(p), nil }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready") - p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, _ := testutils.PodRunningReady(p) @@ -141,7 +141,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := busyBoxPodSpec(nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -158,7 +158,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := busyBoxPodSpec(nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) }) /* @@ -173,7 +173,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -188,7 +188,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) }) /* @@ -204,7 +204,7 @@ var _ = SIGDescribe("Probing container", func() { } pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) // ~2 minutes backoff timeouts + 4 minutes defaultObservationTimeout + 2 minutes for each pod restart - RunLivenessTest(f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute) + RunLivenessTest(ctx, f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute) }) /* @@ -220,7 +220,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers. } pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80) - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) }) /* @@ -237,7 +237,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := busyBoxPodSpec(nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -254,7 +254,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := busyBoxPodSpec(readinessProbe, nil, cmd) - runReadinessFailTest(f, pod, time.Minute) + runReadinessFailTest(ctx, f, pod, time.Minute) }) /* @@ -271,7 +271,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := busyBoxPodSpec(nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -286,7 +286,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -301,7 +301,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) // Expect an event of type "ProbeWarning". expectedEvent := fields.Set{ "involvedObject.kind": "Pod", @@ -310,7 +310,7 @@ var _ = SIGDescribe("Probing container", func() { "reason": events.ContainerProbeWarning, }.AsSelector().String() framework.ExpectNoError(e2eevents.WaitTimeoutForEvent( - f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: Found.", framework.PodEventTimeout)) + ctx, f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: Found.", framework.PodEventTimeout)) }) /* @@ -339,7 +339,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 3, } pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -368,7 +368,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 60, } pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) }) /* @@ -397,7 +397,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 60, } pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) /* @@ -421,22 +421,22 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 120, PeriodSeconds: 5, } - p := podClient.Create(startupPodSpec(startupProbe, readinessProbe, nil, cmd)) + p := podClient.Create(ctx, startupPodSpec(startupProbe, readinessProbe, nil, cmd)) - p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitForPodContainerStarted(f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout) + err = e2epod.WaitForPodContainerStarted(ctx, f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout) framework.ExpectNoError(err) startedTime := time.Now() // We assume the pod became ready when the container became ready. This // is true for a single container pod. - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err) readyTime := time.Now() - p, err = podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err = podClient.Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) @@ -480,7 +480,7 @@ var _ = SIGDescribe("Probing container", func() { } // 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500 - RunLivenessTest(f, pod, 1, time.Second*30) + RunLivenessTest(ctx, f, pod, 1, time.Second*30) }) /* @@ -513,7 +513,7 @@ var _ = SIGDescribe("Probing container", func() { } // 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500 - RunLivenessTest(f, pod, 1, time.Second*30) + RunLivenessTest(ctx, f, pod, 1, time.Second*30) }) /* @@ -535,7 +535,7 @@ var _ = SIGDescribe("Probing container", func() { } pod := gRPCServerPodSpec(nil, livenessProbe, "etcd") - RunLivenessTest(f, pod, 0, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout) }) /* @@ -556,7 +556,7 @@ var _ = SIGDescribe("Probing container", func() { FailureThreshold: 1, } pod := gRPCServerPodSpec(nil, livenessProbe, "etcd") - RunLivenessTest(f, pod, 1, defaultObservationTimeout) + RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout) }) ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) { @@ -580,7 +580,7 @@ done ` // Create Pod - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -608,12 +608,14 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false - podClient.Delete(context.Background(), podName, metav1.DeleteOptions{}) - err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { + err = podClient.Delete(ctx, podName, metav1.DeleteOptions{}) + framework.ExpectNoError(err) + + err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { if !podutil.IsPodReady(pod) { return true, nil } @@ -646,7 +648,7 @@ done ` // Create Pod - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -688,14 +690,15 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false - podClient.Delete(context.Background(), podName, metav1.DeleteOptions{}) + err = podClient.Delete(ctx, podName, metav1.DeleteOptions{}) + framework.ExpectNoError(err) // Wait for pod to go unready - err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { + err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { if !podutil.IsPodReady(pod) { return true, nil } @@ -706,8 +709,8 @@ done // Verify there are zero liveness failures since they are turned off // during pod termination - gomega.Consistently(func() (bool, error) { - items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + gomega.Consistently(ctx, func(ctx context.Context) (bool, error) { + items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, event := range items.Items { // Search only for the pod we are interested in @@ -724,37 +727,41 @@ done }) // waitForPodStatusByInformer waits pod status change by informer -func waitForPodStatusByInformer(c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error { +func waitForPodStatusByInformer(ctx context.Context, c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error { + // TODO (pohly): rewrite with gomega.Eventually to get intermediate progress reports. stopCh := make(chan struct{}) checkPodStatusFunc := func(pod *v1.Pod) { if ok, _ := condition(pod); ok { close(stopCh) } } - controller := newInformerWatchPod(c, podNamespace, podName, checkPodStatusFunc) + controller := newInformerWatchPod(ctx, c, podNamespace, podName, checkPodStatusFunc) go controller.Run(stopCh) after := time.After(timeout) select { case <-stopCh: return nil + case <-ctx.Done(): + close(stopCh) + return fmt.Errorf("timeout to wait pod status ready") case <-after: - defer close(stopCh) + close(stopCh) return fmt.Errorf("timeout to wait pod status ready") } } // newInformerWatchPod creates a informer for given pod -func newInformerWatchPod(c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller { +func newInformerWatchPod(ctx context.Context, c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String() - obj, err := c.CoreV1().Pods(podNamespace).List(context.TODO(), options) + obj, err := c.CoreV1().Pods(podNamespace).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String() - return c.CoreV1().Pods(podNamespace).Watch(context.TODO(), options) + return c.CoreV1().Pods(podNamespace).Watch(ctx, options) }, }, &v1.Pod{}, @@ -936,7 +943,7 @@ func (b webserverProbeBuilder) build() *v1.Probe { } // RunLivenessTest verifies the number of restarts for pod with given expected number of restarts -func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) { +func RunLivenessTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) { podClient := e2epod.NewPodClient(f) ns := f.Namespace.Name gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) @@ -947,18 +954,18 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) }) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) - podClient.Create(pod) + podClient.Create(ctx, pod) // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) - framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name), + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name), fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) framework.Logf("Started pod %s in namespace %s", pod.Name, ns) // Check the pod's current state and verify that restartCount is present. ginkgo.By("checking the pod's current state and verifying that restartCount is present") - pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) @@ -968,7 +975,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, lastRestartCount := initialRestartCount observedRestarts := int32(0) for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount if restartCount != lastRestartCount { @@ -996,7 +1003,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, } } -func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) { +func runReadinessFailTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) { podClient := e2epod.NewPodClient(f) ns := f.Namespace.Name gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) @@ -1007,11 +1014,11 @@ func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil tim return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) }) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) - podClient.Create(pod) + podClient.Create(ctx, pod) // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending', since when failures occur, we go to 'Terminated' which can cause indefinite blocking.) - framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name), + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name), fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) framework.Logf("Started pod %s in namespace %s", pod.Name, ns) diff --git a/test/e2e/common/node/containers.go b/test/e2e/common/node/containers.go index 6b3370ef2a9..950253346bb 100644 --- a/test/e2e/common/node/containers.go +++ b/test/e2e/common/node/containers.go @@ -41,16 +41,16 @@ var _ = SIGDescribe("Containers", func() { framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name) pod.Spec.Containers[0].Args = nil - pod = e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err) pollLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) } // The agnhost's image default entrypoint / args are: "/agnhost pause" // which will print out "Paused". - gomega.Eventually(pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused")) + gomega.Eventually(ctx, pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused")) }) /* @@ -60,7 +60,7 @@ var _ = SIGDescribe("Containers", func() { */ framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") - e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "override arguments", pod, 0, []string{ "[/agnhost entrypoint-tester override arguments]", }) }) @@ -76,7 +76,7 @@ var _ = SIGDescribe("Containers", func() { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} - e2epodoutput.TestContainerOutput(f, "override command", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "override command", pod, 0, []string{ "[/agnhost-2 entrypoint-tester]", }) }) @@ -90,7 +90,7 @@ var _ = SIGDescribe("Containers", func() { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} - e2epodoutput.TestContainerOutput(f, "override all", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "override all", pod, 0, []string{ "[/agnhost-2 entrypoint-tester override arguments]", }) }) diff --git a/test/e2e/common/node/downwardapi.go b/test/e2e/common/node/downwardapi.go index c43c8c9a0e3..c1fbcf57d4f 100644 --- a/test/e2e/common/node/downwardapi.go +++ b/test/e2e/common/node/downwardapi.go @@ -80,7 +80,7 @@ var _ = SIGDescribe("Downward API", func() { fmt.Sprintf("POD_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6), } - testDownwardAPI(f, podName, env, expectations) + testDownwardAPI(ctx, f, podName, env, expectations) }) /* @@ -106,7 +106,7 @@ var _ = SIGDescribe("Downward API", func() { fmt.Sprintf("HOST_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6), } - testDownwardAPI(f, podName, env, expectations) + testDownwardAPI(ctx, f, podName, env, expectations) }) ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) { @@ -155,7 +155,7 @@ var _ = SIGDescribe("Downward API", func() { }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) }) @@ -207,7 +207,7 @@ var _ = SIGDescribe("Downward API", func() { "MEMORY_REQUEST=33554432", } - testDownwardAPI(f, podName, env, expectations) + testDownwardAPI(ctx, f, podName, env, expectations) }) /* @@ -257,7 +257,7 @@ var _ = SIGDescribe("Downward API", func() { }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) }) /* @@ -283,7 +283,7 @@ var _ = SIGDescribe("Downward API", func() { "POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}", } - testDownwardAPI(f, podName, env, expectations) + testDownwardAPI(ctx, f, podName, env, expectations) }) }) @@ -344,7 +344,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI RestartPolicy: v1.RestartPolicyNever, }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) }) ginkgo.It("should provide default limits.hugepages- from node allocatable", func(ctx context.Context) { @@ -381,13 +381,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) }) }) }) -func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { +func testDownwardAPI(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -416,9 +416,9 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) } -func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { - e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations) +func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations) } diff --git a/test/e2e/common/node/ephemeral_containers.go b/test/e2e/common/node/ephemeral_containers.go index 09c3edf2f2a..34f0f3cfc3c 100644 --- a/test/e2e/common/node/ephemeral_containers.go +++ b/test/e2e/common/node/ephemeral_containers.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() { // Description: Adding an ephemeral container to pod.spec MUST result in the container running. framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) { ginkgo.By("creating a target pod") - pod := podClient.CreateSync(&v1.Pod{ + pod := podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"}, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -70,14 +70,14 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() { TTY: true, }, } - err := podClient.AddEphemeralContainerSync(pod, ec, time.Minute) + err := podClient.AddEphemeralContainerSync(ctx, pod, ec, time.Minute) framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod)) ginkgo.By("checking pod container endpoints") // Can't use anything depending on kubectl here because it's not available in the node test environment output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco") gomega.Expect(output).To(gomega.ContainSubstring("marco")) - log, err := e2epod.GetPodLogs(f.ClientSet, pod.Namespace, pod.Name, ecName) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, pod.Namespace, pod.Name, ecName) framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName) gomega.Expect(log).To(gomega.ContainSubstring("polo")) }) diff --git a/test/e2e/common/node/expansion.go b/test/e2e/common/node/expansion.go index 5bf43d6a298..1b0b13bf287 100644 --- a/test/e2e/common/node/expansion.go +++ b/test/e2e/common/node/expansion.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("Variable Expansion", func() { } pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil) - e2epodoutput.TestContainerOutput(f, "env composition", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "env composition", pod, 0, []string{ "FOO=foo-value", "BAR=bar-value", "FOOBAR=foo-value;;bar-value", @@ -81,7 +81,7 @@ var _ = SIGDescribe("Variable Expansion", func() { } pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil) - e2epodoutput.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's command", pod, 0, []string{ "test-value", }) }) @@ -101,7 +101,7 @@ var _ = SIGDescribe("Variable Expansion", func() { pod := newPod([]string{"sh", "-c"}, envVars, nil, nil) pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""} - e2epodoutput.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's args", pod, 0, []string{ "test-value", }) }) @@ -141,7 +141,7 @@ var _ = SIGDescribe("Variable Expansion", func() { envVars[0].Value = pod.ObjectMeta.Name pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"} - e2epodoutput.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "substitution in volume subpath", pod, 0, []string{ "0", }) }) @@ -177,7 +177,7 @@ var _ = SIGDescribe("Variable Expansion", func() { pod := newPod(nil, envVars, mounts, volumes) // Pod should fail - testPodFailSubpath(f, pod) + testPodFailSubpath(ctx, f, pod) }) /* @@ -216,7 +216,7 @@ var _ = SIGDescribe("Variable Expansion", func() { pod := newPod(nil, envVars, mounts, volumes) // Pod should fail - testPodFailSubpath(f, pod) + testPodFailSubpath(ctx, f, pod) }) /* @@ -265,13 +265,13 @@ var _ = SIGDescribe("Variable Expansion", func() { ginkgo.By("creating the pod with failed condition") podClient := e2epod.NewPodClient(f) - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) - err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err, "while waiting for pod to be running") ginkgo.By("updating the pod") - podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) { + podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) { if pod.ObjectMeta.Annotations == nil { pod.ObjectMeta.Annotations = make(map[string]string) } @@ -279,11 +279,11 @@ var _ = SIGDescribe("Variable Expansion", func() { }) ginkgo.By("waiting for pod running") - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for pod to be running") ginkgo.By("deleting the pod gracefully") - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "failed to delete pod") }) @@ -337,48 +337,48 @@ var _ = SIGDescribe("Variable Expansion", func() { ginkgo.By("creating the pod") podClient := e2epod.NewPodClient(f) - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.By("waiting for pod running") - err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for pod to be running") ginkgo.By("creating a file in subpath") cmd := "touch /volume_mount/mypath/foo/test.log" - _, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd) + _, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd) if err != nil { framework.Failf("expected to be able to write to subpath") } ginkgo.By("test for file in mounted path") cmd = "test -f /subpath_mount/test.log" - _, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd) + _, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd) if err != nil { framework.Failf("expected to be able to verify file") } ginkgo.By("updating the annotation value") - podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) { + podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) { pod.ObjectMeta.Annotations["mysubpath"] = "mynewpath" }) ginkgo.By("waiting for annotated pod running") - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for annotated pod to be running") ginkgo.By("deleting the pod gracefully") - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "failed to delete pod") }) }) -func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) { +func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod) { podClient := e2epod.NewPodClient(f) - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod) - err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout) + err := e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for the pod container to fail") } diff --git a/test/e2e/common/node/init_container.go b/test/e2e/common/node/init_container.go index 548d25d2b25..a87230e8436 100644 --- a/test/e2e/common/node/init_container.go +++ b/test/e2e/common/node/init_container.go @@ -210,13 +210,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } framework.Logf("PodSpec: initContainers in spec.initContainers") - startedPod := podClient.Create(pod) + startedPod := podClient.Create(ctx, pod) fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return podClient.Watch(context.TODO(), options) + return podClient.Watch(ctx, options) }, } var events []watch.Event @@ -291,13 +291,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } framework.Logf("PodSpec: initContainers in spec.initContainers") - startedPod := podClient.Create(pod) + startedPod := podClient.Create(ctx, pod) fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return podClient.Watch(context.TODO(), options) + return podClient.Watch(ctx, options) }, } var events []watch.Event @@ -371,13 +371,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } framework.Logf("PodSpec: initContainers in spec.initContainers") - startedPod := podClient.Create(pod) + startedPod := podClient.Create(ctx, pod) fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return podClient.Watch(context.TODO(), options) + return podClient.Watch(ctx, options) }, } @@ -496,13 +496,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } framework.Logf("PodSpec: initContainers in spec.initContainers") - startedPod := podClient.Create(pod) + startedPod := podClient.Create(ctx, pod) fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return podClient.Watch(context.TODO(), options) + return podClient.Watch(ctx, options) }, } diff --git a/test/e2e/common/node/kubelet.go b/test/e2e/common/node/kubelet.go index 31184f4f4ac..ad6a6a369a7 100644 --- a/test/e2e/common/node/kubelet.go +++ b/test/e2e/common/node/kubelet.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("Kubelet", func() { Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) { - podClient.CreateSync(&v1.Pod{ + podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -66,9 +66,9 @@ var _ = SIGDescribe("Kubelet", func() { }, }, }) - gomega.Eventually(func() string { + gomega.Eventually(ctx, func() string { sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour))) - rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(context.TODO()) + rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(ctx) if err != nil { return "" } @@ -82,9 +82,9 @@ var _ = SIGDescribe("Kubelet", func() { ginkgo.Context("when scheduling a busybox command that always fails in a pod", func() { var podName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { podName = "bin-false" + string(uuid.NewUUID()) - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -108,8 +108,8 @@ var _ = SIGDescribe("Kubelet", func() { Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason. */ framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) { - gomega.Eventually(func() error { - podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() error { + podData, err := podClient.Get(ctx, podName, metav1.GetOptions{}) if err != nil { return err } @@ -133,7 +133,7 @@ var _ = SIGDescribe("Kubelet", func() { Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. */ framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) { - err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{}) + err := podClient.Delete(ctx, podName, metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) }) }) @@ -156,12 +156,12 @@ var _ = SIGDescribe("Kubelet", func() { }, } - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.By("Waiting for pod completion") - err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) - rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO()) + rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx) framework.ExpectNoError(err) defer rc.Close() buf := new(bytes.Buffer) @@ -183,7 +183,7 @@ var _ = SIGDescribe("Kubelet", func() { */ framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) { isReadOnly := true - podClient.CreateSync(&v1.Pod{ + podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -202,8 +202,8 @@ var _ = SIGDescribe("Kubelet", func() { }, }, }) - gomega.Eventually(func() string { - rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO()) + gomega.Eventually(ctx, func() string { + rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx) if err != nil { return "" } diff --git a/test/e2e/common/node/kubelet_etc_hosts.go b/test/e2e/common/node/kubelet_etc_hosts.go index 3e01cf557b8..14ebba369c7 100644 --- a/test/e2e/common/node/kubelet_etc_hosts.go +++ b/test/e2e/common/node/kubelet_etc_hosts.go @@ -63,7 +63,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() { */ framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.By("Setting up the test") - config.setup() + config.setup(ctx) ginkgo.By("Running the test") config.verifyEtcHosts() @@ -83,22 +83,22 @@ func (config *KubeletManagedHostConfig) verifyEtcHosts() { assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2") } -func (config *KubeletManagedHostConfig) setup() { +func (config *KubeletManagedHostConfig) setup(ctx context.Context) { ginkgo.By("Creating hostNetwork=false pod") - config.createPodWithoutHostNetwork() + config.createPodWithoutHostNetwork(ctx) ginkgo.By("Creating hostNetwork=true pod") - config.createPodWithHostNetwork() + config.createPodWithHostNetwork(ctx) } -func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() { +func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork(ctx context.Context) { podSpec := config.createPodSpec(etcHostsPodName) - config.pod = e2epod.NewPodClient(config.f).CreateSync(podSpec) + config.pod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec) } -func (config *KubeletManagedHostConfig) createPodWithHostNetwork() { +func (config *KubeletManagedHostConfig) createPodWithHostNetwork(ctx context.Context) { podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName) - config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(podSpec) + config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec) } func assertManagedStatus( diff --git a/test/e2e/common/node/lease.go b/test/e2e/common/node/lease.go index 7bcb7d1cd3a..b3f3a6250b6 100644 --- a/test/e2e/common/node/lease.go +++ b/test/e2e/common/node/lease.go @@ -86,10 +86,10 @@ var _ = SIGDescribe("Lease", func() { }, } - createdLease, err := leaseClient.Create(context.TODO(), lease, metav1.CreateOptions{}) + createdLease, err := leaseClient.Create(ctx, lease, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating Lease failed") - readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) + readLease, err := leaseClient.Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) { framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec)) @@ -103,10 +103,10 @@ var _ = SIGDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(1), } - _, err = leaseClient.Update(context.TODO(), createdLease, metav1.UpdateOptions{}) + _, err = leaseClient.Update(ctx, createdLease, metav1.UpdateOptions{}) framework.ExpectNoError(err, "updating Lease failed") - readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) + readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) { framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec)) @@ -123,10 +123,10 @@ var _ = SIGDescribe("Lease", func() { patchBytes, err := getPatchBytes(readLease, patchedLease) framework.ExpectNoError(err, "creating patch failed") - _, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = leaseClient.Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err, "patching Lease failed") - readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) + readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) { framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec)) @@ -146,25 +146,25 @@ var _ = SIGDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(0), }, } - _, err = leaseClient.Create(context.TODO(), lease2, metav1.CreateOptions{}) + _, err = leaseClient.Create(ctx, lease2, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating Lease failed") - leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{}) + leases, err := leaseClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectEqual(len(leases.Items), 2) selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector() - err = leaseClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) + err = leaseClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err, "couldn't delete collection") - leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) + leases, err = leaseClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectEqual(len(leases.Items), 1) - err = leaseClient.Delete(context.TODO(), name, metav1.DeleteOptions{}) + err = leaseClient.Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting Lease failed") - _, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) + _, err = leaseClient.Get(ctx, name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected IsNotFound error, got %#v", err) } @@ -174,7 +174,7 @@ var _ = SIGDescribe("Lease", func() { // created for every node by the corresponding Kubelet. // That said, the objects themselves are small (~300B), so even with 5000 // of them, that gives ~1.5MB, which is acceptable. - _, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) + _, err = leaseClient.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases from all namespace") }) }) diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go index 716d363760a..57774417066 100644 --- a/test/e2e/common/node/lifecycle_hook.go +++ b/test/e2e/common/node/lifecycle_hook.go @@ -75,8 +75,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { e2epod.NewAgnhostContainer("container-handle-https-request", nil, httpsPorts, httpsArgs...), ) - ginkgo.BeforeEach(func() { - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + ginkgo.BeforeEach(func(ctx context.Context) { + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) targetNode = node.Name nodeSelection := e2epod.NodeSelection{} @@ -85,16 +85,16 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { podClient = e2epod.NewPodClient(f) ginkgo.By("create the container to handle the HTTPGet hook request.") - newPod := podClient.CreateSync(podHandleHookRequest) + newPod := podClient.CreateSync(ctx, podHandleHookRequest) targetIP = newPod.Status.PodIP targetURL = targetIP if strings.Contains(targetIP, ":") { targetURL = fmt.Sprintf("[%s]", targetIP) } }) - testPodWithHook := func(podWithHook *v1.Pod) { + testPodWithHook := func(ctx context.Context, podWithHook *v1.Pod) { ginkgo.By("create the pod with lifecycle hook") - podClient.CreateSync(podWithHook) + podClient.CreateSync(ctx, podWithHook) const ( defaultHandler = iota httpsHandler @@ -107,13 +107,13 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { handlerContainer = httpsHandler } } - gomega.Eventually(func() error { - return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, + gomega.Eventually(ctx, func(ctx context.Context) error { + return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, `GET /echo\?msg=poststart`) }, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil()) } ginkgo.By("delete the pod with lifecycle hook") - podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout) if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { ginkgo.By("check prestop hook") if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil { @@ -121,8 +121,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { handlerContainer = httpsHandler } } - gomega.Eventually(func() error { - return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, + gomega.Eventually(ctx, func(ctx context.Context) error { + return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, `GET /echo\?msg=prestop`) }, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil()) } @@ -142,7 +142,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { } podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) /* Release: v1.9 @@ -158,7 +158,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { }, } podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) /* Release: v1.9 @@ -180,7 +180,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { nodeSelection := e2epod.NodeSelection{} e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) /* Release : v1.23 @@ -203,7 +203,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { nodeSelection := e2epod.NodeSelection{} e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) /* Release : v1.9 @@ -225,7 +225,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { nodeSelection := e2epod.NodeSelection{} e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) /* Release : v1.23 @@ -248,7 +248,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { nodeSelection := e2epod.NodeSelection{} e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) - testPodWithHook(podWithHook) + testPodWithHook(ctx, podWithHook) }) }) }) diff --git a/test/e2e/common/node/node_lease.go b/test/e2e/common/node/node_lease.go index 7cd0fdb385f..5e3c5c7e8f8 100644 --- a/test/e2e/common/node/node_lease.go +++ b/test/e2e/common/node/node_lease.go @@ -42,8 +42,8 @@ var _ = SIGDescribe("NodeLease", func() { f := framework.NewDefaultFramework("node-lease-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + ginkgo.BeforeEach(func(ctx context.Context) { + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name }) @@ -56,8 +56,8 @@ var _ = SIGDescribe("NodeLease", func() { lease *coordinationv1.Lease ) ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace") - gomega.Eventually(func() error { - lease, err = leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() error { + lease, err = leaseClient.Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -67,8 +67,8 @@ var _ = SIGDescribe("NodeLease", func() { gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil()) ginkgo.By("check that node lease is updated at least once within the lease duration") - gomega.Eventually(func() error { - newLease, err := leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() error { + newLease, err := leaseClient.Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -93,8 +93,8 @@ var _ = SIGDescribe("NodeLease", func() { err error leaseList *coordinationv1.LeaseList ) - gomega.Eventually(func() error { - leaseList, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) + gomega.Eventually(ctx, func() error { + leaseList, err = leaseClient.List(ctx, metav1.ListOptions{}) if err != nil { return err } @@ -113,13 +113,13 @@ var _ = SIGDescribe("NodeLease", func() { ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) { ginkgo.By("wait until node is ready") - e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute) + e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, nodeName, 5*time.Minute) ginkgo.By("wait until there is node lease") var err error var lease *coordinationv1.Lease - gomega.Eventually(func() error { - lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(context.TODO(), nodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() error { + lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -134,10 +134,10 @@ var _ = SIGDescribe("NodeLease", func() { // enough time has passed. So for here, keep checking the time diff // between 2 NodeStatus report, until it is longer than lease duration // (the same as nodeMonitorGracePeriod), or it doesn't change for at least leaseDuration - lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName) + lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName) lastObserved := time.Now() err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) { - currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName) + currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName) currentObserved := time.Now() if currentHeartbeatTime == lastHeartbeatTime { @@ -178,7 +178,7 @@ var _ = SIGDescribe("NodeLease", func() { // This check on node status is only meaningful when this e2e test is // running as cluster e2e test, because node e2e test does not create and // run controller manager, i.e., no node lifecycle controller. - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) @@ -186,8 +186,8 @@ var _ = SIGDescribe("NodeLease", func() { }) }) -func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) { - node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func getHeartbeatTimeAndStatus(ctx context.Context, clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) { + node, err := clientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) diff --git a/test/e2e/common/node/pod_admission.go b/test/e2e/common/node/pod_admission.go index 5ec022528f8..2129ecee6f8 100644 --- a/test/e2e/common/node/pod_admission.go +++ b/test/e2e/common/node/pod_admission.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("Kubelet", func() { ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) { - linuxNode, err := findLinuxNode(f) + linuxNode, err := findLinuxNode(ctx, f) framework.ExpectNoError(err) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -57,18 +57,18 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() { NodeName: linuxNode.Name, // Set the node to an node which doesn't support }, } - pod = e2epod.NewPodClient(f).Create(pod) + pod = e2epod.NewPodClient(f).Create(ctx, pod) // Check the pod is still not running - err = e2epod.WaitForPodFailedReason(f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort) + err = e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort) framework.ExpectNoError(err) }) }) }) // findLinuxNode finds a Linux node that is Ready and Schedulable -func findLinuxNode(f *framework.Framework) (v1.Node, error) { +func findLinuxNode(ctx context.Context, f *framework.Framework) (v1.Node, error) { selector := labels.Set{"kubernetes.io/os": "linux"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return v1.Node{}, err diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index b017a2a80b6..698e032d003 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -69,15 +69,15 @@ const ( ) // testHostIP tests that a pod gets a host IP -func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) { +func testHostIP(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod) { ginkgo.By("creating pod") - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t := time.Now() for { - p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) if p.Status.HostIP != "" { framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) @@ -92,40 +92,40 @@ func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) { } } -func startPodAndGetBackOffs(podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) { - podClient.CreateSync(pod) +func startPodAndGetBackOffs(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) { + podClient.CreateSync(ctx, pod) time.Sleep(sleepAmount) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) podName := pod.Name containerName := pod.Spec.Containers[0].Name ginkgo.By("getting restart delay-0") - _, err := getRestartDelay(podClient, podName, containerName) + _, err := getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } ginkgo.By("getting restart delay-1") - delay1, err := getRestartDelay(podClient, podName, containerName) + delay1, err := getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } ginkgo.By("getting restart delay-2") - delay2, err := getRestartDelay(podClient, podName, containerName) + delay2, err := getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } return delay1, delay2 } -func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) { +func getRestartDelay(ctx context.Context, podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) { beginTime := time.Now() var previousRestartCount int32 = -1 var previousFinishedAt time.Time for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay time.Sleep(time.Second) - pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName) if !ok { @@ -171,6 +171,7 @@ func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName // expectNoErrorWithRetries checks if an error occurs with the given retry count. func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { + // TODO (pohly): replace the entire function with gomege.Eventually. var err error for i := 0; i < maxRetries; i++ { err = fn() @@ -203,7 +204,7 @@ var _ = SIGDescribe("Pods", func() { */ framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) { name := "pod-hostip-" + string(uuid.NewUUID()) - testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ + testHostIP(ctx, podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -248,37 +249,37 @@ var _ = SIGDescribe("Pods", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = selector.String() - podList, err := podClient.List(context.TODO(), options) + podList, err := podClient.List(ctx, options) return podList, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = selector.String() - return podClient.Watch(context.TODO(), options) + return podClient.Watch(ctx, options) }, } _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) defer w.Stop() - ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) + ctxUntil, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + if !cache.WaitForCacheSync(ctxUntil.Done(), informer.HasSynced) { framework.Failf("Timeout while waiting to Pod informer to sync") } ginkgo.By("submitting the pod to kubernetes") - podClient.Create(pod) + podClient.Create(ctx, pod) ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) @@ -294,13 +295,13 @@ var _ = SIGDescribe("Pods", func() { // We need to wait for the pod to be running, otherwise the deletion // may be carried out immediately rather than gracefully. - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) // save the running pod - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30)) + err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") ginkgo.By("verifying pod deletion was observed") @@ -331,7 +332,7 @@ var _ = SIGDescribe("Pods", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) }) @@ -364,27 +365,27 @@ var _ = SIGDescribe("Pods", func() { }) ginkgo.By("submitting the pod to kubernetes") - pod = podClient.CreateSync(pod) + pod = podClient.CreateSync(ctx, pod) ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) ginkgo.By("updating the pod") - podClient.Update(name, func(pod *v1.Pod) { + podClient.Update(ctx, name, func(pod *v1.Pod) { value = strconv.Itoa(time.Now().Nanosecond()) pod.Labels["time"] = value }) - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("verifying the updated pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) framework.Logf("Pod update OK") @@ -418,22 +419,22 @@ var _ = SIGDescribe("Pods", func() { }) ginkgo.By("submitting the pod to kubernetes") - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set{"time": value}) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) ginkgo.By("updating the pod") - podClient.Update(name, func(pod *v1.Pod) { + podClient.Update(ctx, name, func(pod *v1.Pod) { newDeadline := int64(5) pod.Spec.ActiveDeadlineSeconds = &newDeadline }) - framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name)) }) /* @@ -460,7 +461,7 @@ var _ = SIGDescribe("Pods", func() { }, }, }) - podClient.CreateSync(serverPod) + podClient.CreateSync(ctx, serverPod) // This service exposes port 8080 of the test pod as a service on port 8765 // TODO(filbranden): We would like to use a unique service name such as: @@ -487,7 +488,7 @@ var _ = SIGDescribe("Pods", func() { }, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service") // Make a client pod that verifies that it has the service environment variables. @@ -523,7 +524,7 @@ var _ = SIGDescribe("Pods", func() { "FOOSERVICE_PORT_8765_TCP_ADDR=", } expectNoErrorWithRetries(func() error { - return e2epodoutput.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring) + return e2epodoutput.MatchContainerOutput(ctx, f, pod, containerName, expectedVars, gomega.ContainSubstring) }, maxRetries, "Container should have service environment variables set") }) @@ -555,7 +556,7 @@ var _ = SIGDescribe("Pods", func() { }) ginkgo.By("submitting the pod to kubernetes") - pod = podClient.CreateSync(pod) + pod = podClient.CreateSync(ctx, pod) req := f.ClientSet.CoreV1().RESTClient().Get(). Namespace(f.Namespace.Name). @@ -576,7 +577,7 @@ var _ = SIGDescribe("Pods", func() { defer ws.Close() buf := &bytes.Buffer{} - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { for { var msg []byte if err := websocket.Message.Receive(ws, &msg); err != nil { @@ -637,7 +638,7 @@ var _ = SIGDescribe("Pods", func() { }) ginkgo.By("submitting the pod to kubernetes") - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) req := f.ClientSet.CoreV1().RESTClient().Get(). Namespace(f.Namespace.Name). @@ -692,18 +693,18 @@ var _ = SIGDescribe("Pods", func() { }, }) - delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration) + delay1, delay2 := startPodAndGetBackOffs(ctx, podClient, pod, buildBackOffDuration) ginkgo.By("updating the image") - podClient.Update(podName, func(pod *v1.Pod) { + podClient.Update(ctx, podName, func(pod *v1.Pod) { pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx) }) time.Sleep(syncLoopFrequency) - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("get restart delay after image update") - delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName) + delayAfterUpdate, err := getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } @@ -733,7 +734,7 @@ var _ = SIGDescribe("Pods", func() { }, }) - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x // wait for a delay == capped delay of MaxContainerBackOff @@ -743,7 +744,7 @@ var _ = SIGDescribe("Pods", func() { err error ) for i := 0; i < 3; i++ { - delay1, err = getRestartDelay(podClient, podName, containerName) + delay1, err = getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } @@ -758,7 +759,7 @@ var _ = SIGDescribe("Pods", func() { } ginkgo.By("getting restart delay after a capped delay") - delay2, err := getRestartDelay(podClient, podName, containerName) + delay2, err := getRestartDelay(ctx, podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } @@ -795,7 +796,7 @@ var _ = SIGDescribe("Pods", func() { validatePodReadiness := func(expectReady bool) { err := wait.Poll(time.Second, time.Minute, func() (bool, error) { - pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err) podReady := podutils.IsPodReady(pod) res := expectReady == podReady @@ -808,29 +809,29 @@ var _ = SIGDescribe("Pods", func() { } ginkgo.By("submitting the pod to kubernetes") - e2epod.NewPodClient(f).Create(pod) - e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) - if podClient.PodIsReady(podName) { + e2epod.NewPodClient(f).Create(ctx, pod) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) + if podClient.PodIsReady(ctx, podName) { framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name) } ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) - _, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status") + _, err := podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) // Sleep for 10 seconds. time.Sleep(syncLoopFrequency) // Verify the pod is still not ready - if podClient.PodIsReady(podName) { + if podClient.PodIsReady(ctx, podName) { framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name) } ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) - _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status") + _, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) validatePodReadiness(true) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) - _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status") + _, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) validatePodReadiness(false) @@ -850,7 +851,7 @@ var _ = SIGDescribe("Pods", func() { ginkgo.By("Create set of pods") // create a set of pods in test namespace for _, podTestName := range podTestNames { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podTestName, @@ -872,17 +873,17 @@ var _ = SIGDescribe("Pods", func() { // wait as required for all 3 pods to be running ginkgo.By("waiting for all 3 pods to be running") - err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) framework.ExpectNoError(err, "3 pods not found running.") // delete Collection of pods with a label in the current namespace - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{ LabelSelector: "type=Testing"}) framework.ExpectNoError(err, "failed to delete collection of pods") // wait for all pods to be deleted ginkgo.By("waiting for all pods to be deleted") - err = wait.PollImmediate(podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0)) + err = wait.PollImmediateWithContext(ctx, podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0)) framework.ExpectNoError(err, "found a pod(s)") }) @@ -906,10 +907,10 @@ var _ = SIGDescribe("Pods", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = testPodLabelsFlat - return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(ctx, options) }, } - podsList, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: testPodLabelsFlat}) + podsList, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{LabelSelector: testPodLabelsFlat}) framework.ExpectNoError(err, "failed to list Pods") testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ @@ -928,13 +929,13 @@ var _ = SIGDescribe("Pods", func() { }, }) ginkgo.By("creating a Pod with a static label") - _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), testPod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(ctx, testPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName) ginkgo.By("watching for Pod to be ready") - ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) + ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() - _, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if pod, ok := event.Object.(*v1.Pod); ok { found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name && pod.ObjectMeta.Namespace == testNamespaceName && @@ -953,7 +954,7 @@ var _ = SIGDescribe("Pods", func() { if err != nil { framework.Logf("failed to see event that pod is created: %v", err) } - p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) + p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get Pod %v in namespace %v", testPodName, testNamespaceName) framework.ExpectEqual(p.Status.Phase, v1.PodRunning, "failed to see Pod %v in namespace %v running", p.ObjectMeta.Name, testNamespaceName) @@ -972,11 +973,11 @@ var _ = SIGDescribe("Pods", func() { }, }) framework.ExpectNoError(err, "failed to marshal JSON patch for Pod") - _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(context.TODO(), testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(ctx, testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName) - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, prePatchResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, prePatchResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if pod, ok := event.Object.(*v1.Pod); ok { @@ -994,7 +995,7 @@ var _ = SIGDescribe("Pods", func() { } ginkgo.By("getting the Pod and ensuring that it's patched") - pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName) framework.ExpectEqual(pod.ObjectMeta.Labels["test-pod"], "patched", "failed to patch Pod - missing label") framework.ExpectEqual(pod.Spec.Containers[0].Image, testPodImage2, "failed to patch Pod - wrong image") @@ -1003,7 +1004,7 @@ var _ = SIGDescribe("Pods", func() { var podStatusUpdate *v1.Pod err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}, "status") + podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName) podStatusBytes, err := json.Marshal(podStatusUnstructured) framework.ExpectNoError(err, "failed to marshal unstructured response") @@ -1020,7 +1021,7 @@ var _ = SIGDescribe("Pods", func() { } } framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to patch all relevant Pod conditions") - podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(context.TODO(), &podStatusUpdated, metav1.UpdateOptions{}) + podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(ctx, &podStatusUpdated, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName) @@ -1037,13 +1038,13 @@ var _ = SIGDescribe("Pods", func() { ginkgo.By("deleting the Pod via a Collection with a LabelSelector") preDeleteResourceVersion := podStatusUpdate.ResourceVersion - err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat}) + err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat}) framework.ExpectNoError(err, "failed to delete Pod by collection") ginkgo.By("watching for the Pod to be deleted") - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute) + ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute) defer cancel() - _, err = watchtools.Until(ctx, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: if pod, ok := event.Object.(*v1.Pod); ok { @@ -1061,7 +1062,7 @@ var _ = SIGDescribe("Pods", func() { if err != nil { framework.Logf("failed to see %v event: %v", watch.Deleted, err) } - postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) + postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}) var postDeletePodJSON []byte if postDeletePod != nil { postDeletePodJSON, _ = json.Marshal(postDeletePod) @@ -1102,9 +1103,9 @@ var _ = SIGDescribe("Pods", func() { }, }, }) - pod, err := podClient.Create(context.TODO(), testPod, metav1.CreateOptions{}) + pod, err := podClient.Create(ctx, testPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, ns) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Pod didn't start within time out period") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period") ginkgo.By("patching /status") podStatus := v1.PodStatus{ @@ -1114,7 +1115,7 @@ var _ = SIGDescribe("Pods", func() { pStatusJSON, err := json.Marshal(podStatus) framework.ExpectNoError(err, "Failed to marshal. %v", podStatus) - pStatus, err := podClient.Patch(context.TODO(), podName, types.MergePatchType, + pStatus, err := podClient.Patch(ctx, podName, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(pStatusJSON)+`}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "failed to patch pod: %q", podName) @@ -1124,11 +1125,11 @@ var _ = SIGDescribe("Pods", func() { }) }) -func checkPodListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { - return func() (bool, error) { +func checkPodListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { var err error - list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: label}) if err != nil { diff --git a/test/e2e/common/node/podtemplates.go b/test/e2e/common/node/podtemplates.go index bf11b7148ac..dd1cd2be6ac 100644 --- a/test/e2e/common/node/podtemplates.go +++ b/test/e2e/common/node/podtemplates.go @@ -55,14 +55,14 @@ var _ = SIGDescribe("PodTemplates", func() { podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID()) // get a list of PodTemplates (in all namespaces to hit endpoint) - podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ + podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{ LabelSelector: "podtemplate-static=true", }) framework.ExpectNoError(err, "failed to list all PodTemplates") framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates") // create a PodTemplate - _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{ + _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(ctx, &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: podTemplateName, Labels: map[string]string{ @@ -80,7 +80,7 @@ var _ = SIGDescribe("PodTemplates", func() { framework.ExpectNoError(err, "failed to create PodTemplate") // get template - podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) + podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get created PodTemplate") framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName) @@ -93,20 +93,20 @@ var _ = SIGDescribe("PodTemplates", func() { }, }) framework.ExpectNoError(err, "failed to marshal patch data") - _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(ctx, podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch PodTemplate") // get template (ensure label is there) - podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) + podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get PodTemplate") framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found") // delete the PodTemplate - err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(ctx, podTemplateName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete PodTemplate") // list the PodTemplates - podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ + podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{ LabelSelector: "podtemplate-static=true", }) framework.ExpectNoError(err, "failed to list PodTemplate") @@ -125,7 +125,7 @@ var _ = SIGDescribe("PodTemplates", func() { ginkgo.By("Create set of pod templates") // create a set of pod templates in test namespace for _, podTemplateName := range podTemplateNames { - _, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(context.TODO(), &v1.PodTemplate{ + _, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(ctx, &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: podTemplateName, Labels: map[string]string{"podtemplate-set": "true"}, @@ -144,7 +144,7 @@ var _ = SIGDescribe("PodTemplates", func() { ginkgo.By("get a list of pod templates with a label in the current namespace") // get a list of pod templates - podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: "podtemplate-set=true", }) framework.ExpectNoError(err, "failed to get a list of pod templates") @@ -155,13 +155,13 @@ var _ = SIGDescribe("PodTemplates", func() { // delete collection framework.Logf("requesting DeleteCollection of pod templates") - err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "podtemplate-set=true"}) framework.ExpectNoError(err, "failed to delete all pod templates") ginkgo.By("check that the list of pod templates matches the requested quantity") - err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(f, "podtemplate-set=true", 0)) + err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(ctx, f, "podtemplate-set=true", 0)) framework.ExpectNoError(err, "failed to count required pod templates") }) @@ -178,7 +178,7 @@ var _ = SIGDescribe("PodTemplates", func() { ptName := "podtemplate-" + utilrand.String(5) ginkgo.By("Create a pod template") - ptResource, err := ptClient.Create(context.TODO(), &v1.PodTemplate{ + ptResource, err := ptClient.Create(ctx, &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: ptName, }, @@ -196,12 +196,12 @@ var _ = SIGDescribe("PodTemplates", func() { var updatedPT *v1.PodTemplate err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - ptResource, err = ptClient.Get(context.TODO(), ptName, metav1.GetOptions{}) + ptResource, err = ptClient.Get(ctx, ptName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod template %s", ptName) ptResource.Annotations = map[string]string{ "updated": "true", } - updatedPT, err = ptClient.Update(context.TODO(), ptResource, metav1.UpdateOptions{}) + updatedPT, err = ptClient.Update(ctx, ptResource, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -211,13 +211,13 @@ var _ = SIGDescribe("PodTemplates", func() { }) -func checkPodTemplateListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { +func checkPodTemplateListQuantity(ctx context.Context, f *framework.Framework, label string, quantity int) func() (bool, error) { return func() (bool, error) { var err error framework.Logf("requesting list of pod templates to confirm quantity") - list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: label}) if err != nil { diff --git a/test/e2e/common/node/privileged.go b/test/e2e/common/node/privileged.go index 5d0db7718a2..8212764b2e0 100644 --- a/test/e2e/common/node/privileged.go +++ b/test/e2e/common/node/privileged.go @@ -54,7 +54,7 @@ var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() { ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) { // Windows does not support privileged containers. ginkgo.By("Creating a pod with a privileged container") - config.createPods() + config.createPods(ctx) ginkgo.By("Executing in the privileged container") config.run(config.privilegedContainer, true) @@ -115,7 +115,7 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod { } } -func (c *PrivilegedPodTestConfig) createPods() { +func (c *PrivilegedPodTestConfig) createPods(ctx context.Context) { podSpec := c.createPodsSpec() - c.pod = e2epod.NewPodClient(c.f).CreateSync(podSpec) + c.pod = e2epod.NewPodClient(c.f).CreateSync(ctx, podSpec) } diff --git a/test/e2e/common/node/runtime.go b/test/e2e/common/node/runtime.go index f0261f4ebb5..9d2c1475ef5 100644 --- a/test/e2e/common/node/runtime.go +++ b/test/e2e/common/node/runtime.go @@ -104,32 +104,32 @@ while true; do sleep 1; done RestartPolicy: testCase.RestartPolicy, Volumes: testVolumes, } - terminateContainer.Create() + terminateContainer.Create(ctx) ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name)) - gomega.Eventually(func() (int32, error) { - status, err := terminateContainer.GetStatus() + gomega.Eventually(ctx, func() (int32, error) { + status, err := terminateContainer.GetStatus(ctx) return status.RestartCount, err }, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.RestartCount)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name)) - gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase)) + gomega.Eventually(ctx, terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name)) - isReady, err := terminateContainer.IsReady() + isReady, err := terminateContainer.IsReady(ctx) framework.ExpectEqual(isReady, testCase.Ready) framework.ExpectNoError(err) - status, err := terminateContainer.GetStatus() + status, err := terminateContainer.GetStatus(ctx) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name)) framework.ExpectEqual(GetContainerState(status.State), testCase.State) ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name)) - gomega.Expect(terminateContainer.Delete()).To(gomega.Succeed()) - gomega.Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse()) + gomega.Expect(terminateContainer.Delete(ctx)).To(gomega.Succeed()) + gomega.Eventually(ctx, terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse()) } }) }) @@ -141,7 +141,7 @@ while true; do sleep 1; done nonAdminUserName := "ContainerUser" // Create and then terminate the container under defined PodPhase to verify if termination message matches the expected output. Lastly delete the created container. - matchTerminationMessage := func(container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) { + matchTerminationMessage := func(ctx context.Context, container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) { container.Name = "termination-message-container" c := ConformanceContainer{ PodClient: e2epod.NewPodClient(f), @@ -150,14 +150,14 @@ while true; do sleep 1; done } ginkgo.By("create the container") - c.Create() + c.Create(ctx) ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete)) ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase)) - gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase)) + gomega.Eventually(ctx, c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase)) ginkgo.By("get the container status") - status, err := c.GetStatus() + status, err := c.GetStatus(ctx) framework.ExpectNoError(err) ginkgo.By("the container should be terminated") @@ -168,7 +168,7 @@ while true; do sleep 1; done gomega.Expect(status.State.Terminated.Message).Should(expectedMsg) ginkgo.By("delete the container") - gomega.Expect(c.Delete()).To(gomega.Succeed()) + gomega.Expect(c.Delete(ctx)).To(gomega.Succeed()) } ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) { @@ -184,7 +184,7 @@ while true; do sleep 1; done } else { container.SecurityContext.RunAsUser = &rootUser } - matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE")) + matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE")) }) /* @@ -205,7 +205,7 @@ while true; do sleep 1; done } else { container.SecurityContext.RunAsUser = &nonRootUser } - matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE")) + matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE")) }) /* @@ -221,7 +221,7 @@ while true; do sleep 1; done TerminationMessagePath: "/dev/termination-log", TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, } - matchTerminationMessage(container, v1.PodFailed, gomega.Equal("DONE")) + matchTerminationMessage(ctx, container, v1.PodFailed, gomega.Equal("DONE")) }) /* @@ -237,7 +237,7 @@ while true; do sleep 1; done TerminationMessagePath: "/dev/termination-log", TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, } - matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("")) + matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("")) }) /* @@ -253,7 +253,7 @@ while true; do sleep 1; done TerminationMessagePath: "/dev/termination-log", TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, } - matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("OK")) + matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("OK")) }) }) @@ -262,7 +262,7 @@ while true; do sleep 1; done // Images used for ConformanceContainer are not added into NodePrePullImageList, because this test is // testing image pulling, these images don't need to be prepulled. The ImagePullPolicy // is v1.PullAlways, so it won't be blocked by framework image pre-pull list check. - imagePullTest := func(image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) { + imagePullTest := func(ctx context.Context, image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) { command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} if windowsImage { // -t: Ping the specified host until stopped. @@ -301,14 +301,14 @@ while true; do sleep 1; done } secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) ginkgo.By("create image pull secret") - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{}) container.ImagePullSecrets = []string{secret.Name} } // checkContainerStatus checks whether the container status matches expectation. - checkContainerStatus := func() error { - status, err := container.GetStatus() + checkContainerStatus := func(ctx context.Context) error { + status, err := container.GetStatus(ctx) if err != nil { return fmt.Errorf("failed to get container status: %v", err) } @@ -333,7 +333,7 @@ while true; do sleep 1; done } } // Check pod phase - phase, err := container.GetPhase() + phase, err := container.GetPhase(ctx) if err != nil { return fmt.Errorf("failed to get pod phase: %v", err) } @@ -348,15 +348,15 @@ while true; do sleep 1; done for i := 1; i <= flakeRetry; i++ { var err error ginkgo.By("create the container") - container.Create() + container.Create(ctx) ginkgo.By("check the container status") for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) { - if err = checkContainerStatus(); err == nil { + if err = checkContainerStatus(ctx); err == nil { break } } ginkgo.By("delete the container") - container.Delete() + _ = container.Delete(ctx) if err == nil { break } @@ -370,18 +370,18 @@ while true; do sleep 1; done ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage) - imagePullTest(image, false, v1.PodPending, true, false) + imagePullTest(ctx, image, false, v1.PodPending, true, false) }) ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) { // NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows. image := imageutils.GetE2EImage(imageutils.Agnhost) - imagePullTest(image, false, v1.PodRunning, false, false) + imagePullTest(ctx, image, false, v1.PodRunning, false, false) }) ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) - imagePullTest(image, false, v1.PodPending, true, false) + imagePullTest(ctx, image, false, v1.PodPending, true, false) }) ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) { @@ -391,7 +391,7 @@ while true; do sleep 1; done image = imageutils.GetE2EImage(imageutils.AuthenticatedWindowsNanoServer) isWindows = true } - imagePullTest(image, true, v1.PodRunning, false, isWindows) + imagePullTest(ctx, image, true, v1.PodRunning, false, isWindows) }) }) }) diff --git a/test/e2e/common/node/runtimeclass.go b/test/e2e/common/node/runtimeclass.go index 41e0010eed0..673b73d5130 100644 --- a/test/e2e/common/node/runtimeclass.go +++ b/test/e2e/common/node/runtimeclass.go @@ -54,15 +54,15 @@ var _ = SIGDescribe("RuntimeClass", func() { */ framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) { rcName := f.Namespace.Name + "-nonexistent" - expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName)) + expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName)) }) // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { handler := f.Namespace.Name + "-handler" - rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil) + rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) - pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) + pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName)) eventSelector := fields.Set{ "involvedObject.kind": "Pod", "involvedObject.name": pod.Name, @@ -70,12 +70,12 @@ var _ = SIGDescribe("RuntimeClass", func() { "reason": events.FailedCreatePodSandBox, }.AsSelector().String() // Events are unreliable, don't depend on the event. It's used only to speed up the test. - err := e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout) + err := e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout) if err != nil { framework.Logf("Warning: did not get event about FailedCreatePodSandBox. Err: %v", err) } // Check the pod is still not running - p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) @@ -87,10 +87,10 @@ var _ = SIGDescribe("RuntimeClass", func() { // see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076 e2eskipper.SkipUnlessProviderIs("gce") - rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) + rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) - pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) - expectPodSuccess(f, pod) + pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName)) + expectPodSuccess(ctx, f, pod) }) /* @@ -102,12 +102,12 @@ var _ = SIGDescribe("RuntimeClass", func() { is not being tested here. */ framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) { - rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) + rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) - pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) + pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName)) // there is only one pod in the namespace label := labels.SelectorFromSet(labels.Set(map[string]string{})) - pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) + pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label) framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass") framework.ExpectEqual(len(pods.Items), 1) @@ -127,17 +127,17 @@ var _ = SIGDescribe("RuntimeClass", func() { is not being tested here. */ framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) { - rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{ + rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{ PodFixed: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"), }, }) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) - pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) + pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName)) // there is only one pod in the namespace label := labels.SelectorFromSet(labels.Set(map[string]string{})) - pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) + pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label) framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass") framework.ExpectEqual(len(pods.Items), 1) @@ -154,16 +154,16 @@ var _ = SIGDescribe("RuntimeClass", func() { Description: Pod requesting the deleted RuntimeClass must be rejected. */ framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) { - rcName := createRuntimeClass(f, "delete-me", "runc", nil) + rcName := createRuntimeClass(ctx, f, "delete-me", "runc", nil) rcClient := f.ClientSet.NodeV1().RuntimeClasses() ginkgo.By("Deleting RuntimeClass "+rcName, func() { - err := rcClient.Delete(context.TODO(), rcName, metav1.DeleteOptions{}) + err := rcClient.Delete(ctx, rcName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName) ginkgo.By("Waiting for the RuntimeClass to disappear") framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) { - _, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{}) + _, err := rcClient.Get(ctx, rcName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done } @@ -174,7 +174,7 @@ var _ = SIGDescribe("RuntimeClass", func() { })) }) - expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName)) + expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName)) }) /* @@ -227,7 +227,7 @@ var _ = SIGDescribe("RuntimeClass", func() { ginkgo.By("getting /apis/node.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -260,43 +260,43 @@ var _ = SIGDescribe("RuntimeClass", func() { // Main resource create/read/update/watch operations ginkgo.By("creating") - createdRC, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + createdRC, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) + _, err = rcClient.Create(ctx, rc, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { framework.Failf("expected 409, got %#v", err) } - _, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{}) + _, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("watching") framework.Logf("starting watch") - rcWatch, err := rcClient.Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + rcWatch, err := rcClient.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) // added for a watch - _, err = rcClient.Create(context.TODO(), rc3, metav1.CreateOptions{}) + _, err = rcClient.Create(ctx, rc3, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) + gottenRC, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenRC.UID, createdRC.UID) ginkgo.By("listing") - rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + rcs, err := rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(rcs.Items), 3, "filtered list should have 3 items") ginkgo.By("patching") - patchedRC, err := rcClient.Patch(context.TODO(), createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedRC, err := rcClient.Patch(ctx, createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedRC.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") csrToUpdate := patchedRC.DeepCopy() csrToUpdate.Annotations["updated"] = "true" - updatedRC, err := rcClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{}) + updatedRC, err := rcClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedRC.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -338,43 +338,43 @@ var _ = SIGDescribe("RuntimeClass", func() { // main resource delete operations ginkgo.By("deleting") - err = rcClient.Delete(context.TODO(), createdRC.Name, metav1.DeleteOptions{}) + err = rcClient.Delete(ctx, createdRC.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = rcClient.Get(context.TODO(), createdRC.Name, metav1.GetOptions{}) + _, err = rcClient.Get(ctx, createdRC.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %#v", err) } - rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items") ginkgo.By("deleting a collection") - err = rcClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + err = rcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) - rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(rcs.Items), 0, "filtered list should have 0 items") }) }) -func deleteRuntimeClass(f *framework.Framework, name string) { - err := f.ClientSet.NodeV1().RuntimeClasses().Delete(context.TODO(), name, metav1.DeleteOptions{}) +func deleteRuntimeClass(ctx context.Context, f *framework.Framework, name string) { + err := f.ClientSet.NodeV1().RuntimeClasses().Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete RuntimeClass resource") } // createRuntimeClass generates a RuntimeClass with the desired handler and a "namespaced" name, // synchronously creates it, and returns the generated name. -func createRuntimeClass(f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string { +func createRuntimeClass(ctx context.Context, f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string { uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name) rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler) rc.Overhead = overhead - rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") return rc.GetName() } -func expectPodRejection(f *framework.Framework, pod *v1.Pod) { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) +func expectPodRejection(ctx context.Context, f *framework.Framework, pod *v1.Pod) { + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err, "should be forbidden") if !apierrors.IsForbidden(err) { framework.Failf("expected forbidden error, got %#v", err) @@ -382,7 +382,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) { } // expectPodSuccess waits for the given pod to terminate successfully. -func expectPodSuccess(f *framework.Framework, pod *v1.Pod) { +func expectPodSuccess(ctx context.Context, f *framework.Framework, pod *v1.Pod) { framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( - f.ClientSet, pod.Name, f.Namespace.Name)) + ctx, f.ClientSet, pod.Name, f.Namespace.Name)) } diff --git a/test/e2e/common/node/secrets.go b/test/e2e/common/node/secrets.go index 8305aedc1b3..fdcaf5c38a5 100644 --- a/test/e2e/common/node/secrets.go +++ b/test/e2e/common/node/secrets.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -82,7 +82,7 @@ var _ = SIGDescribe("Secrets", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{ "SECRET_DATA=value-1", }) }) @@ -97,7 +97,7 @@ var _ = SIGDescribe("Secrets", func() { secret := secretForTest(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -126,7 +126,7 @@ var _ = SIGDescribe("Secrets", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{ "data-1=value-1", "data-2=value-2", "data-3=value-3", "p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3", }) @@ -138,7 +138,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Attempt to create a Secret with an empty key. The creation MUST fail. */ framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) { - secret, err := createEmptyKeySecretForTest(f) + secret, err := createEmptyKeySecretForTest(ctx, f) framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name) }) @@ -157,7 +157,7 @@ var _ = SIGDescribe("Secrets", func() { secretTestName := "test-secret-" + string(uuid.NewUUID()) // create a secret in the test namespace - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), &v1.Secret{ + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretTestName, Labels: map[string]string{ @@ -173,7 +173,7 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero") // list all secrets in all namespaces to ensure endpoint coverage - secretsList, err := f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ + secretsList, err := f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{ LabelSelector: "testsecret-constant=true", }) framework.ExpectNoError(err, "failed to list secrets") @@ -202,10 +202,10 @@ var _ = SIGDescribe("Secrets", func() { "data": map[string][]byte{"key": []byte(secretPatchNewData)}, }) framework.ExpectNoError(err, "failed to marshal JSON") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(ctx, secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch secret") - secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, secretCreatedName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get secret") secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"])) @@ -214,14 +214,14 @@ var _ = SIGDescribe("Secrets", func() { framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch") ginkgo.By("deleting the secret using a LabelSelector") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "testsecret=true", }) framework.ExpectNoError(err, "failed to delete patched secret") ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch") // list all secrets in all namespaces - secretsList, err = f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ + secretsList, err = f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{ LabelSelector: "testsecret-constant=true", }) framework.ExpectNoError(err, "failed to list secrets") @@ -253,7 +253,7 @@ func secretForTest(namespace, name string) *v1.Secret { } } -func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) { +func createEmptyKeySecretForTest(ctx context.Context, f *framework.Framework) (*v1.Secret, error) { secretName := "secret-emptykey-test-" + string(uuid.NewUUID()) secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -265,5 +265,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) { }, } ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) - return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) } diff --git a/test/e2e/common/node/security_context.go b/test/e2e/common/node/security_context.go index 30db61e1f1a..4997a96b534 100644 --- a/test/e2e/common/node/security_context.go +++ b/test/e2e/common/node/security_context.go @@ -76,23 +76,23 @@ var _ = SIGDescribe("Security Context", func() { // with hostUsers=false the pod must use a new user namespace podClient := e2epod.PodClientNS(f, f.Namespace.Name) - createdPod1 := podClient.Create(makePod(false)) - createdPod2 := podClient.Create(makePod(false)) + createdPod1 := podClient.Create(ctx, makePod(false)) + createdPod2 := podClient.Create(ctx, makePod(false)) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("delete the pods") - podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) }) getLogs := func(pod *v1.Pod) (string, error) { - err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart) + err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart) if err != nil { return "", err } - podStatus, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + podStatus, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return "", err } - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podStatus.Name, containerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podStatus.Name, containerName) } logs1, err := getLogs(createdPod1) @@ -116,7 +116,7 @@ var _ = SIGDescribe("Security Context", func() { // When running in the host's user namespace, the /proc/self/uid_map file content looks like: // 0 0 4294967295 // Verify the value 4294967295 is present in the output. - e2epodoutput.TestContainerOutput(f, "read namespace", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "read namespace", pod, 0, []string{ "4294967295", }) }) @@ -129,14 +129,14 @@ var _ = SIGDescribe("Security Context", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } // Create secret. secret := secretForTest(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -240,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() { // Each line should be "=0" that means root inside the container is the owner of the file. downwardAPIVolFiles := 1 projectedFiles := len(secret.Data) + downwardAPIVolFiles - e2epodoutput.TestContainerOutput(f, "check file permissions", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "check file permissions", pod, 0, []string{ strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles), }) }) @@ -251,7 +251,7 @@ var _ = SIGDescribe("Security Context", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -300,7 +300,7 @@ var _ = SIGDescribe("Security Context", func() { // Expect one line for each file on all the volumes. // Each line should be "=200" (fsGroup) that means it was mapped to the // right user inside the container. - e2epodoutput.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "check FSGroup is mapped correctly", pod, 0, []string{ strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)), }) }) @@ -327,15 +327,15 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndWaitUserPod := func(userid int64) { + createAndWaitUserPod := func(ctx context.Context, userid int64) { podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID()) - podClient.Create(makeUserPod(podName, + podClient.Create(ctx, makeUserPod(podName, framework.BusyBoxImage, []string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)}, userid, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) } /* @@ -345,7 +345,7 @@ var _ = SIGDescribe("Security Context", func() { [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - createAndWaitUserPod(65534) + createAndWaitUserPod(ctx, 65534) }) /* @@ -356,7 +356,7 @@ var _ = SIGDescribe("Security Context", func() { [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - createAndWaitUserPod(0) + createAndWaitUserPod(ctx, 0) }) }) @@ -390,19 +390,19 @@ var _ = SIGDescribe("Security Context", func() { e2eskipper.SkipIfNodeOSDistroIs("windows") name := "explicit-nonroot-uid" pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(nonRootTestUserID)) - podClient.Create(pod) + podClient.Create(ctx, pod) - podClient.WaitForSuccess(name, framework.PodStartTimeout) - framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1000")) + podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout) + framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1000")) }) ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) { // creates a pod with RunAsUser, which is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") name := "explicit-root-uid" pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0)) - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) - ev, err := podClient.WaitForErrorEventOrSuccess(pod) + ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod) framework.ExpectNoError(err) gomega.Expect(ev).NotTo(gomega.BeNil()) framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer) @@ -410,17 +410,17 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should run with an image specified user ID", func(ctx context.Context) { name := "implicit-nonroot-uid" pod := makeNonRootPod(name, nonRootImage, nil) - podClient.Create(pod) + podClient.Create(ctx, pod) - podClient.WaitForSuccess(name, framework.PodStartTimeout) - framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234")) + podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout) + framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1234")) }) ginkgo.It("should not run without a specified user ID", func(ctx context.Context) { name := "implicit-root-uid" pod := makeNonRootPod(name, rootImage, nil) - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) - ev, err := podClient.WaitForErrorEventOrSuccess(pod) + ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod) framework.ExpectNoError(err) gomega.Expect(ev).NotTo(gomega.BeNil()) framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer) @@ -448,18 +448,18 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndWaitUserPod := func(readOnlyRootFilesystem bool) string { + createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string { podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID()) - podClient.Create(makeUserPod(podName, + podClient.Create(ctx, makeUserPod(podName, framework.BusyBoxImage, []string{"sh", "-c", "touch checkfile"}, readOnlyRootFilesystem, )) if readOnlyRootFilesystem { - waitForFailure(f, podName, framework.PodStartTimeout) + waitForFailure(ctx, f, podName, framework.PodStartTimeout) } else { - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) } return podName @@ -474,7 +474,7 @@ var _ = SIGDescribe("Security Context", func() { [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access. */ ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - createAndWaitUserPod(true) + createAndWaitUserPod(ctx, true) }) /* @@ -484,7 +484,7 @@ var _ = SIGDescribe("Security Context", func() { Write operation MUST be allowed and Pod MUST be in Succeeded state. */ framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) { - createAndWaitUserPod(false) + createAndWaitUserPod(ctx, false) }) }) @@ -509,14 +509,14 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndWaitUserPod := func(privileged bool) string { + createAndWaitUserPod := func(ctx context.Context, privileged bool) string { podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID()) - podClient.Create(makeUserPod(podName, + podClient.Create(ctx, makeUserPod(podName, framework.BusyBoxImage, []string{"sh", "-c", "ip link add dummy0 type dummy || true"}, privileged, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) return podName } /* @@ -526,8 +526,8 @@ var _ = SIGDescribe("Security Context", func() { [LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command. */ framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - podName := createAndWaitUserPod(false) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + podName := createAndWaitUserPod(ctx, false) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } @@ -539,8 +539,8 @@ var _ = SIGDescribe("Security Context", func() { }) ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) { - podName := createAndWaitUserPod(true) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + podName := createAndWaitUserPod(ctx, true) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } @@ -573,13 +573,13 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error { - podClient.Create(makeAllowPrivilegeEscalationPod(podName, + createAndMatchOutput := func(ctx context.Context, podName, output string, allowPrivilegeEscalation *bool, uid int64) error { + podClient.Create(ctx, makeAllowPrivilegeEscalationPod(podName, allowPrivilegeEscalation, uid, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) - return podClient.MatchContainerOutput(podName, podName, output) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) + return podClient.MatchContainerOutput(ctx, podName, podName, output) } /* @@ -593,7 +593,7 @@ var _ = SIGDescribe("Security Context", func() { */ ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) - if err := createAndMatchOutput(podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil { + if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) } }) @@ -609,7 +609,7 @@ var _ = SIGDescribe("Security Context", func() { framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-false-" + string(uuid.NewUUID()) apeFalse := false - if err := createAndMatchOutput(podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil { + if err := createAndMatchOutput(ctx, podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) } }) @@ -626,7 +626,7 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-true-" + string(uuid.NewUUID()) apeTrue := true - if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil { + if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) } }) @@ -634,8 +634,8 @@ var _ = SIGDescribe("Security Context", func() { }) // waitForFailure waits for pod to fail. -func waitForFailure(f *framework.Framework, name string, timeout time.Duration) { - gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, +func waitForFailure(ctx context.Context, f *framework.Framework, name string, timeout time.Duration) { + gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: diff --git a/test/e2e/common/node/sysctl.go b/test/e2e/common/node/sysctl.go index 3cad7c6d76b..fbf4bed0f71 100644 --- a/test/e2e/common/node/sysctl.go +++ b/test/e2e/common/node/sysctl.go @@ -87,27 +87,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"} ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl") - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.By("Watching for error events or started pod") // watch for events instead of termination of pod because the kubelet deletes // failed pods without running containers. This would create a race as the pod // might have already been deleted here. - ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod) + ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod) framework.ExpectNoError(err) gomega.Expect(ev).To(gomega.BeNil()) ginkgo.By("Waiting for pod completion") - err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Checking that the pod succeeded") framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded) ginkgo.By("Getting logs from the pod") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) ginkgo.By("Checking that the sysctl is actually updated") @@ -146,7 +146,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { ginkgo.By("Creating a pod with one valid and two invalid sysctls") client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := client.Create(ctx, pod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.BeNil()) gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`)) @@ -168,11 +168,11 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { } ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node") - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.By("Wait for pod failed reason") // watch for pod failed reason instead of termination of pod - err := e2epod.WaitForPodFailedReason(f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart) + err := e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart) framework.ExpectNoError(err) }) @@ -195,27 +195,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel/shm_rmid_forced"} ginkgo.By("Creating a pod with the kernel/shm_rmid_forced sysctl") - pod = podClient.Create(pod) + pod = podClient.Create(ctx, pod) ginkgo.By("Watching for error events or started pod") // watch for events instead of termination of pod because the kubelet deletes // failed pods without running containers. This would create a race as the pod // might have already been deleted here. - ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod) + ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod) framework.ExpectNoError(err) gomega.Expect(ev).To(gomega.BeNil()) ginkgo.By("Waiting for pod completion") - err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Checking that the pod succeeded") framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded) ginkgo.By("Getting logs from the pod") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) ginkgo.By("Checking that the sysctl is actually updated") diff --git a/test/e2e/common/storage/configmap_volume.go b/test/e2e/common/storage/configmap_volume.go index 610ba3f87dc..20fb5057ddc 100644 --- a/test/e2e/common/storage/configmap_volume.go +++ b/test/e2e/common/storage/configmap_volume.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { - doConfigMapE2EWithoutMappings(f, false, 0, nil) + doConfigMapE2EWithoutMappings(ctx, f, false, 0, nil) }) /* @@ -56,14 +56,14 @@ var _ = SIGDescribe("ConfigMap", func() { */ framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) - doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) + doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ - doConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode) + doConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode) }) /* @@ -72,13 +72,13 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { - doConfigMapE2EWithoutMappings(f, true, 0, nil) + doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") - doConfigMapE2EWithoutMappings(f, true, 1001, nil) + doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) }) /* @@ -87,7 +87,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { - doConfigMapE2EWithMappings(f, false, 0, nil) + doConfigMapE2EWithMappings(ctx, f, false, 0, nil) }) /* @@ -98,7 +98,7 @@ var _ = SIGDescribe("ConfigMap", func() { */ framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) - doConfigMapE2EWithMappings(f, false, 0, &mode) + doConfigMapE2EWithMappings(ctx, f, false, 0, &mode) }) /* @@ -107,13 +107,13 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { - doConfigMapE2EWithMappings(f, true, 0, nil) + doConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") - doConfigMapE2EWithMappings(f, true, 1001, nil) + doConfigMapE2EWithMappings(ctx, f, true, 1001, nil) }) /* @@ -122,7 +122,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. */ framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) name := "configmap-test-upd-" + string(uuid.NewUUID()) @@ -141,7 +141,7 @@ var _ = SIGDescribe("ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -149,22 +149,22 @@ var _ = SIGDescribe("ConfigMap", func() { "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1") ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) } - gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) + gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) }) /* @@ -173,7 +173,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod. */ framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) name := "configmap-test-upd-" + string(uuid.NewUUID()) @@ -196,7 +196,7 @@ var _ = SIGDescribe("ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -216,20 +216,20 @@ var _ = SIGDescribe("ConfigMap", func() { }) ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).Create(pod) - e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) pollLogs1 := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) } pollLogs2 := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name) } ginkgo.By("Waiting for pod with text data") - gomega.Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By("Waiting for pod with binary data") - gomega.Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff")) + gomega.Eventually(ctx, pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff")) }) /* @@ -238,7 +238,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file). */ framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true volumeMountPath := "/etc/configmap-volumes" @@ -284,12 +284,12 @@ var _ = SIGDescribe("ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -375,44 +375,44 @@ var _ = SIGDescribe("ConfigMap", func() { }, } ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollCreateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1")) }) /* @@ -432,7 +432,7 @@ var _ = SIGDescribe("ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -486,7 +486,7 @@ var _ = SIGDescribe("ConfigMap", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{ "content of file \"/etc/configmap-volume/data-1\": value-1", }) @@ -505,28 +505,28 @@ var _ = SIGDescribe("ConfigMap", func() { name := "immutable" configMap := newConfigMap(f, name) - currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Data["data-4"] = "value-4" - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Mark config map as immutable. trueVal := true currentConfigMap.Immutable = &trueVal - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace) // Ensure data can't be changed now. currentConfigMap.Data["data-5"] = "value-5" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{}) if !apierrors.IsInvalid(err) { framework.Failf("expected 'invalid' as error, got instead: %v", err) } // Ensure config map can't be switched from immutable to mutable. - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) if !*currentConfigMap.Immutable { framework.Failf("currentConfigMap %s can be switched from immutable to mutable", currentConfigMap.Name) @@ -534,20 +534,20 @@ var _ = SIGDescribe("ConfigMap", func() { falseVal := false currentConfigMap.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{}) if !apierrors.IsInvalid(err) { framework.Failf("expected 'invalid' as error, got instead: %v", err) } // Ensure that metadata can be changed. - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Ensure that immutable config map can be deleted. - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace) }) @@ -556,7 +556,7 @@ var _ = SIGDescribe("ConfigMap", func() { // Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" - pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) + pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) }) @@ -565,7 +565,7 @@ var _ = SIGDescribe("ConfigMap", func() { // Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" - pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) + pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) }) }) @@ -584,7 +584,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap { } } -func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { +func doConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { groupID := int64(fsGroup) var ( @@ -596,7 +596,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -622,10 +622,10 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup "content of file \"/etc/configmap-volume/data-1\": value-1", fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output) } -func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { +func doConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { groupID := int64(fsGroup) var ( @@ -638,7 +638,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -674,11 +674,11 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode) output = append(output, fileModeRegexp) } - e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output) } -func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) +func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) falseValue := false @@ -691,12 +691,12 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath strin pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue ginkgo.By("Creating the pod") - pod = e2epod.NewPodClient(f).Create(pod) - return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) } -func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) +func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) falseValue := false @@ -706,7 +706,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } // creating a pod with configMap object, but with different key which is not present in configMap object. @@ -721,8 +721,8 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount } ginkgo.By("Creating the pod") - pod = e2epod.NewPodClient(f).Create(pod) - return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) } func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod { diff --git a/test/e2e/common/storage/downwardapi.go b/test/e2e/common/storage/downwardapi.go index cbe295259bc..0f1616f7eaa 100644 --- a/test/e2e/common/storage/downwardapi.go +++ b/test/e2e/common/storage/downwardapi.go @@ -62,7 +62,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024), } - testDownwardAPIForEphemeralStorage(f, podName, env, expectations) + testDownwardAPIForEphemeralStorage(ctx, f, podName, env, expectations) }) ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) { @@ -98,13 +98,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) }) }) }) -func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { +func testDownwardAPIForEphemeralStorage(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -131,9 +131,9 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, }, } - testDownwardAPIUsingPod(f, pod, env, expectations) + testDownwardAPIUsingPod(ctx, f, pod, env, expectations) } -func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { - e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations) +func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations) } diff --git a/test/e2e/common/storage/downwardapi_volume.go b/test/e2e/common/storage/downwardapi_volume.go index 24c818f5356..6537f6e1f35 100644 --- a/test/e2e/common/storage/downwardapi_volume.go +++ b/test/e2e/common/storage/downwardapi_volume.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("%s\n", podName), }) }) @@ -71,7 +71,7 @@ var _ = SIGDescribe("Downward API volume", func() { defaultMode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--------", }) }) @@ -87,7 +87,7 @@ var _ = SIGDescribe("Downward API volume", func() { mode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--------", }) }) @@ -102,7 +102,7 @@ var _ = SIGDescribe("Downward API volume", func() { FSGroup: &gid, } setPodNonRootUser(pod) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("%s\n", podName), }) }) @@ -118,7 +118,7 @@ var _ = SIGDescribe("Downward API volume", func() { FSGroup: &gid, } setPodNonRootUser(pod) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--r-----", }) }) @@ -137,20 +137,20 @@ var _ = SIGDescribe("Downward API volume", func() { pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels") containerName := "client-container" ginkgo.By("Creating the pod") - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n")) //modify labels - podClient.Update(podName, func(pod *v1.Pod) { + podClient.Update(ctx, podName, func(pod *v1.Pod) { pod.Labels["key3"] = "value3" }) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n")) }) @@ -168,20 +168,20 @@ var _ = SIGDescribe("Downward API volume", func() { containerName := "client-container" ginkgo.By("Creating the pod") - pod = podClient.CreateSync(pod) + pod = podClient.CreateSync(ctx, pod) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n")) //modify annotations - podClient.Update(podName, func(pod *v1.Pod) { + podClient.Update(ctx, podName, func(pod *v1.Pod) { pod.Annotations["builder"] = "foo" }) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n")) }) @@ -195,7 +195,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("2\n"), }) }) @@ -209,7 +209,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("67108864\n"), }) }) @@ -223,7 +223,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("1\n"), }) }) @@ -237,7 +237,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("33554432\n"), }) }) @@ -251,7 +251,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") - e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) }) /* @@ -263,7 +263,7 @@ var _ = SIGDescribe("Downward API volume", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") - e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) }) }) diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index 9224fd5bb3b..0989f226380 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -54,27 +54,27 @@ var _ = SIGDescribe("EmptyDir volumes", func() { }) ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) { - doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory) + doTestSetgidFSGroup(ctx, f, 0, v1.StorageMediumMemory) }) ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) { - doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory) + doTestSetgidFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory) }) ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) { - doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory) + doTestSubPathFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory) }) ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) { - doTest0644FSGroup(f, 0, v1.StorageMediumMemory) + doTest0644FSGroup(ctx, f, 0, v1.StorageMediumMemory) }) ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) { - doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault) + doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumDefault) }) ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) { - doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory) + doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumMemory) }) }) @@ -85,7 +85,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'. */ framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTestVolumeMode(f, 0, v1.StorageMediumMemory) + doTestVolumeMode(ctx, f, 0, v1.StorageMediumMemory) }) /* @@ -95,7 +95,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0644(f, 0, v1.StorageMediumMemory) + doTest0644(ctx, f, 0, v1.StorageMediumMemory) }) /* @@ -105,7 +105,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0666(f, 0, v1.StorageMediumMemory) + doTest0666(ctx, f, 0, v1.StorageMediumMemory) }) /* @@ -115,7 +115,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0777(f, 0, v1.StorageMediumMemory) + doTest0777(ctx, f, 0, v1.StorageMediumMemory) }) /* @@ -125,7 +125,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0644(f, nonRootUID, v1.StorageMediumMemory) + doTest0644(ctx, f, nonRootUID, v1.StorageMediumMemory) }) /* @@ -135,7 +135,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0666(f, nonRootUID, v1.StorageMediumMemory) + doTest0666(ctx, f, nonRootUID, v1.StorageMediumMemory) }) /* @@ -145,7 +145,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0777(f, nonRootUID, v1.StorageMediumMemory) + doTest0777(ctx, f, nonRootUID, v1.StorageMediumMemory) }) /* @@ -155,7 +155,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTestVolumeMode(f, 0, v1.StorageMediumDefault) + doTestVolumeMode(ctx, f, 0, v1.StorageMediumDefault) }) /* @@ -165,7 +165,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0644(f, 0, v1.StorageMediumDefault) + doTest0644(ctx, f, 0, v1.StorageMediumDefault) }) /* @@ -175,7 +175,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0666(f, 0, v1.StorageMediumDefault) + doTest0666(ctx, f, 0, v1.StorageMediumDefault) }) /* @@ -185,7 +185,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0777(f, 0, v1.StorageMediumDefault) + doTest0777(ctx, f, 0, v1.StorageMediumDefault) }) /* @@ -195,7 +195,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0644(f, nonRootUID, v1.StorageMediumDefault) + doTest0644(ctx, f, nonRootUID, v1.StorageMediumDefault) }) /* @@ -205,7 +205,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0666(f, nonRootUID, v1.StorageMediumDefault) + doTest0666(ctx, f, nonRootUID, v1.StorageMediumDefault) }) /* @@ -215,7 +215,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { - doTest0777(f, nonRootUID, v1.StorageMediumDefault) + doTest0777(ctx, f, nonRootUID, v1.StorageMediumDefault) }) /* @@ -283,8 +283,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() { } ginkgo.By("Creating Pod") - e2epod.NewPodClient(f).Create(pod) - e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("Reading file content from the nginx-container") result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath)) @@ -343,14 +343,14 @@ var _ = SIGDescribe("EmptyDir volumes", func() { var err error ginkgo.By("Creating Pod") - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("Waiting for the pod running") - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name) ginkgo.By("Getting the pod") - pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %s", pod.Name) ginkgo.By("Reading empty dir size") @@ -364,7 +364,7 @@ const ( volumeName = "test-volume" ) -func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTestSetgidFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( filePath = path.Join(volumePath, "test-file") source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -391,10 +391,10 @@ func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMed if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTestSubPathFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( subPath = "test-sub" source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -424,10 +424,10 @@ func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMe if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTestVolumeModeFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( source = &v1.EmptyDirVolumeSource{Medium: medium} pod = testPodWithVolume(uid, volumePath, source) @@ -449,10 +449,10 @@ func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.Storag if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTest0644FSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( filePath = path.Join(volumePath, "test-file") source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -477,10 +477,10 @@ func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMediu if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTestVolumeMode(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( source = &v1.EmptyDirVolumeSource{Medium: medium} pod = testPodWithVolume(uid, volumePath, source) @@ -499,10 +499,10 @@ func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTest0644(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( filePath = path.Join(volumePath, "test-file") source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -524,10 +524,10 @@ func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) { if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTest0666(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( filePath = path.Join(volumePath, "test-file") source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -549,10 +549,10 @@ func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) { if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } -func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) { +func doTest0777(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) { var ( filePath = path.Join(volumePath, "test-file") source = &v1.EmptyDirVolumeSource{Medium: medium} @@ -574,7 +574,7 @@ func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) { if medium == v1.StorageMediumMemory { out = append(out, "mount type of \"/test-volume\": tmpfs") } - e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) + e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } func formatMedium(medium v1.StorageMedium) string { diff --git a/test/e2e/common/storage/host_path.go b/test/e2e/common/storage/host_path.go index 0c8de0350fe..e92220f1280 100644 --- a/test/e2e/common/storage/host_path.go +++ b/test/e2e/common/storage/host_path.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("HostPath", func() { fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--file_mode=%v", volumePath), } - e2epodoutput.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{ + e2epodoutput.TestContainerOutputRegexp(ctx, f, "hostPath mode", pod, 0, []string{ "mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir }) }) @@ -89,7 +89,7 @@ var _ = SIGDescribe("HostPath", func() { } //Read the content of the file with the second container to //verify volumes being shared properly among containers within the pod. - e2epodoutput.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "hostPath r/w", pod, 1, []string{ "content of file \"/test-volume/test-file\": mount-tester new file", }) }) @@ -126,7 +126,7 @@ var _ = SIGDescribe("HostPath", func() { fmt.Sprintf("--retry_time=%d", retryDuration), } - e2epodoutput.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "hostPath subPath", pod, 1, []string{ "content of file \"" + filePathInReader + "\": mount-tester new file", }) }) diff --git a/test/e2e/common/storage/projected_combined.go b/test/e2e/common/storage/projected_combined.go index c4f9522c5bd..31246a9af8c 100644 --- a/test/e2e/common/storage/projected_combined.go +++ b/test/e2e/common/storage/projected_combined.go @@ -66,11 +66,11 @@ var _ = SIGDescribe("Projected combined", func() { } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -89,7 +89,7 @@ var _ = SIGDescribe("Projected combined", func() { }, }, } - e2epodoutput.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "Check all projections for projected volume plugin", pod, 0, []string{ podName, "secret-value-1", "configmap-value-1", diff --git a/test/e2e/common/storage/projected_configmap.go b/test/e2e/common/storage/projected_configmap.go index a255e9a4609..024c2ada66d 100644 --- a/test/e2e/common/storage/projected_configmap.go +++ b/test/e2e/common/storage/projected_configmap.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { - doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil) + doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, nil) }) /* @@ -56,14 +56,14 @@ var _ = SIGDescribe("Projected configMap", func() { */ framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) - doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) + doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ - doProjectedConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode) + doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode) }) /* @@ -72,13 +72,13 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { - doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil) + doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") - doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil) + doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) }) /* @@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--. */ framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { - doProjectedConfigMapE2EWithMappings(f, false, 0, nil) + doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, nil) }) /* @@ -98,7 +98,7 @@ var _ = SIGDescribe("Projected configMap", func() { */ framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) - doProjectedConfigMapE2EWithMappings(f, false, 0, &mode) + doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, &mode) }) /* @@ -107,13 +107,13 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. */ framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { - doProjectedConfigMapE2EWithMappings(f, true, 0, nil) + doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") - doProjectedConfigMapE2EWithMappings(f, true, 1001, nil) + doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil) }) /* @@ -122,7 +122,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2. */ framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) name := "projected-configmap-test-upd-" + string(uuid.NewUUID()) @@ -140,7 +140,7 @@ var _ = SIGDescribe("Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -148,22 +148,22 @@ var _ = SIGDescribe("Projected configMap", func() { "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1") ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) } - gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) + gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) }) /* @@ -172,7 +172,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container. */ framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true volumeMountPath := "/etc/projected-configmap-volumes" @@ -218,12 +218,12 @@ var _ = SIGDescribe("Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -327,44 +327,44 @@ var _ = SIGDescribe("Projected configMap", func() { }, } ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollCreateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1")) }) /* @@ -384,7 +384,7 @@ var _ = SIGDescribe("Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -451,7 +451,7 @@ var _ = SIGDescribe("Projected configMap", func() { }, } - e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{ "content of file \"/etc/projected-configmap-volume/data-1\": value-1", }) @@ -462,7 +462,7 @@ var _ = SIGDescribe("Projected configMap", func() { //Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/projected-configmap-volumes" - pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) + pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) }) @@ -471,12 +471,12 @@ var _ = SIGDescribe("Projected configMap", func() { //Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" - pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) + pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) }) }) -func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { +func doProjectedConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { groupID := int64(fsGroup) var ( @@ -488,7 +488,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -513,10 +513,10 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, "content of file \"/etc/projected-configmap-volume/data-1\": value-1", fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output) } -func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { +func doProjectedConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { groupID := int64(fsGroup) var ( @@ -529,7 +529,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -564,7 +564,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode) output = append(output, fileModeRegexp) } - e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output) } func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod { diff --git a/test/e2e/common/storage/projected_downwardapi.go b/test/e2e/common/storage/projected_downwardapi.go index 7de8c266052..3616662ba59 100644 --- a/test/e2e/common/storage/projected_downwardapi.go +++ b/test/e2e/common/storage/projected_downwardapi.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("%s\n", podName), }) }) @@ -71,7 +71,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { defaultMode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--------", }) }) @@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { mode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--------", }) }) @@ -102,7 +102,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { FSGroup: &gid, } setPodNonRootUser(pod) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("%s\n", podName), }) }) @@ -118,7 +118,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { FSGroup: &gid, } setPodNonRootUser(pod) - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ "mode of file \"/etc/podinfo/podname\": -r--r-----", }) }) @@ -137,20 +137,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() { pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels") containerName := "client-container" ginkgo.By("Creating the pod") - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n")) //modify labels - podClient.Update(podName, func(pod *v1.Pod) { + podClient.Update(ctx, podName, func(pod *v1.Pod) { pod.Labels["key3"] = "value3" }) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n")) }) @@ -168,20 +168,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() { containerName := "client-container" ginkgo.By("Creating the pod") - pod = podClient.CreateSync(pod) + pod = podClient.CreateSync(ctx, pod) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n")) //modify annotations - podClient.Update(podName, func(pod *v1.Pod) { + podClient.Update(ctx, podName, func(pod *v1.Pod) { pod.Annotations["builder"] = "foo" }) - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n")) }) @@ -195,7 +195,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("2\n"), }) }) @@ -209,7 +209,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("67108864\n"), }) }) @@ -223,7 +223,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("1\n"), }) }) @@ -237,7 +237,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") - e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ + e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{ fmt.Sprintf("33554432\n"), }) }) @@ -251,7 +251,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") - e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) }) /* @@ -263,7 +263,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") - e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) }) }) diff --git a/test/e2e/common/storage/projected_secret.go b/test/e2e/common/storage/projected_secret.go index 62d6e3a059d..6706ab10ce3 100644 --- a/test/e2e/common/storage/projected_secret.go +++ b/test/e2e/common/storage/projected_secret.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default. */ framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { - doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) + doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) /* @@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected secret", func() { */ framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) - doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) + doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) /* @@ -67,7 +67,7 @@ var _ = SIGDescribe("Projected secret", func() { framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) - doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) + doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) }) /* @@ -76,7 +76,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume. */ framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { - doProjectedSecretE2EWithMapping(f, nil) + doProjectedSecretE2EWithMapping(ctx, f, nil) }) /* @@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected secret", func() { */ framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) - doProjectedSecretE2EWithMapping(f, &mode) + doProjectedSecretE2EWithMapping(ctx, f, &mode) }) ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { @@ -97,7 +97,7 @@ var _ = SIGDescribe("Projected secret", func() { secret2Name = "projected-secret-test-" + string(uuid.NewUUID()) ) - if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { + if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil { framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) } @@ -105,10 +105,10 @@ var _ = SIGDescribe("Projected secret", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } - doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) + doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil) }) /* @@ -131,7 +131,7 @@ var _ = SIGDescribe("Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -201,7 +201,7 @@ var _ = SIGDescribe("Projected secret", func() { } fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil) - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{ + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{ "content of file \"/etc/projected-secret-volume/data-1\": value-1", fileModeRegexp, }) @@ -213,7 +213,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true volumeMountPath := "/etc/projected-secret-volumes" @@ -259,12 +259,12 @@ var _ = SIGDescribe("Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -368,44 +368,44 @@ var _ = SIGDescribe("Projected secret", func() { }, } ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollCreateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1")) }) //The secret is in pending during volume creation until the secret objects are available @@ -414,7 +414,7 @@ var _ = SIGDescribe("Projected secret", func() { ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/projected-secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) - err := createNonOptionalSecretPod(f, volumeMountPath, podName) + err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) }) @@ -424,12 +424,12 @@ var _ = SIGDescribe("Projected secret", func() { ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) - err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) + err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) }) }) -func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, +func doProjectedSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32, secretName string, fsGroup *int64, uid *int64) { var ( volumeName = "projected-secret-volume" @@ -439,7 +439,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -505,10 +505,10 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput) } -func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { +func doProjectedSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) { var ( name = "projected-secret-test-map-" + string(uuid.NewUUID()) volumeName = "projected-secret-volume" @@ -518,7 +518,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -582,5 +582,5 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput) } diff --git a/test/e2e/common/storage/secrets_volume.go b/test/e2e/common/storage/secrets_volume.go index 1713bdc7b76..b8ea46c5901 100644 --- a/test/e2e/common/storage/secrets_volume.go +++ b/test/e2e/common/storage/secrets_volume.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default. */ framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { - doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) + doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) /* @@ -56,7 +56,7 @@ var _ = SIGDescribe("Secrets", func() { */ framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) - doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) + doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) /* @@ -68,7 +68,7 @@ var _ = SIGDescribe("Secrets", func() { framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) - doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) + doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) }) /* @@ -77,7 +77,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default. */ framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { - doSecretE2EWithMapping(f, nil) + doSecretE2EWithMapping(ctx, f, nil) }) /* @@ -88,7 +88,7 @@ var _ = SIGDescribe("Secrets", func() { */ framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) - doSecretE2EWithMapping(f, &mode) + doSecretE2EWithMapping(ctx, f, &mode) }) /* @@ -103,7 +103,7 @@ var _ = SIGDescribe("Secrets", func() { secret2Name = "secret-test-" + string(uuid.NewUUID()) ) - if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { + if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil { framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) } @@ -111,10 +111,10 @@ var _ = SIGDescribe("Secrets", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } - doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) + doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil) }) /* @@ -137,7 +137,7 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -191,7 +191,7 @@ var _ = SIGDescribe("Secrets", func() { } fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil) - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{ + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{ "content of file \"/etc/secret-volume/data-1\": value-1", fileModeRegexp, }) @@ -203,7 +203,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true volumeMountPath := "/etc/secret-volumes" @@ -249,12 +249,12 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -334,44 +334,44 @@ var _ = SIGDescribe("Secrets", func() { }, } ginkgo.By("Creating the pod") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) pollCreateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } ginkgo.By("waiting to observe update in volume") - gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) - gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) - gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1")) + gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) + gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) + gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1")) }) /* @@ -387,28 +387,28 @@ var _ = SIGDescribe("Secrets", func() { name := "immutable" secret := secretForTest(f.Namespace.Name, name) - currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Data["data-4"] = []byte("value-4\n") - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Mark secret as immutable. trueVal := true currentSecret.Immutable = &trueVal - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace) // Ensure data can't be changed now. currentSecret.Data["data-5"] = []byte("value-5\n") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{}) if !apierrors.IsInvalid(err) { framework.Failf("expected 'invalid' as error, got instead: %v", err) } // Ensure secret can't be switched from immutable to mutable. - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) if !*currentSecret.Immutable { framework.Failf("currentSecret %s can be switched from immutable to mutable", currentSecret.Name) @@ -416,20 +416,20 @@ var _ = SIGDescribe("Secrets", func() { falseVal := false currentSecret.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{}) if !apierrors.IsInvalid(err) { framework.Failf("expected 'invalid' as error, got instead: %v", err) } // Ensure that metadata can be changed. - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Ensure that immutable secret can be deleted. - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace) }) @@ -439,7 +439,7 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) - err := createNonOptionalSecretPod(f, volumeMountPath, podName) + err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) }) @@ -449,7 +449,7 @@ var _ = SIGDescribe("Secrets", func() { ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) - err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) + err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) }) }) @@ -468,7 +468,7 @@ func secretForTest(namespace, name string) *v1.Secret { } } -func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string, +func doSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32, secretName string, fsGroup *int64, uid *int64) { var ( volumeName = "secret-volume" @@ -478,7 +478,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -535,10 +535,10 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput) } -func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { +func doSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) { var ( name = "secret-test-map-" + string(uuid.NewUUID()) volumeName = "secret-volume" @@ -548,7 +548,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -603,11 +603,11 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { fileModeRegexp, } - e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) + e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput) } -func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) +func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error { + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) falseValue := false @@ -650,12 +650,12 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName }, } ginkgo.By("Creating the pod") - pod = e2epod.NewPodClient(f).Create(pod) - return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) } -func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error { - podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) +func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error { + podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) falseValue := false @@ -667,7 +667,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } // creating a pod with secret object, with the key which is not present in secret object. @@ -711,6 +711,6 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat }, } ginkgo.By("Creating the pod") - pod = e2epod.NewPodClient(f).Create(pod) - return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) } diff --git a/test/e2e/common/storage/volumes.go b/test/e2e/common/storage/volumes.go index cf3bd52ba5c..65e0b8b1a16 100644 --- a/test/e2e/common/storage/volumes.go +++ b/test/e2e/common/storage/volumes.go @@ -77,7 +77,7 @@ var _ = SIGDescribe("Volumes", func() { //////////////////////////////////////////////////////////////////////// ginkgo.Describe("NFSv4", func() { ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) { - config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) + config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{}) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config) tests := []e2evolume.Test{ @@ -95,13 +95,13 @@ var _ = SIGDescribe("Volumes", func() { } // Must match content of test/images/volumes-tester/nfs/index.html - e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests) }) }) ginkgo.Describe("NFSv3", func() { ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) { - config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) + config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{}) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config) tests := []e2evolume.Test{ @@ -118,7 +118,7 @@ var _ = SIGDescribe("Volumes", func() { }, } // Must match content of test/images/volume-tester/nfs/index.html - e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests) }) }) }) diff --git a/test/e2e/dra/deploy.go b/test/e2e/dra/deploy.go index 9d4b1f9a606..ffb80764404 100644 --- a/test/e2e/dra/deploy.go +++ b/test/e2e/dra/deploy.go @@ -59,7 +59,7 @@ type Nodes struct { // NewNodes selects nodes to run the test on. func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes { nodes := &Nodes{} - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("selecting nodes") // The kubelet plugin is harder. We deploy the builtin manifest // after patching in the driver name and all nodes on which we @@ -67,7 +67,7 @@ func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes { // // Only a subset of the nodes are picked to avoid causing // unnecessary load on a big cluster. - nodeList, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) + nodeList, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes) framework.ExpectNoError(err, "get nodes") numNodes := int32(len(nodeList.Items)) if int(numNodes) < minNodes { @@ -160,7 +160,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { rsName := "" draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock") numNodes := int32(len(nodes.NodeNames)) - err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error { + err := utils.CreateFromManifests(ctx, d.f, d.f.Namespace, func(item interface{}) error { switch item := item.(type) { case *appsv1.ReplicaSet: item.Name += d.NameSuffix @@ -197,7 +197,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { framework.ExpectNoError(err, "get replicaset") // Wait for all pods to be running. - if err := e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(d.f.ClientSet, rs, numNodes); err != nil { + if err := e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, d.f.ClientSet, rs, numNodes); err != nil { framework.ExpectNoError(err, "all kubelet plugin proxies running") } requirement, err := labels.NewRequirement(instanceKey, selection.Equals, []string{d.Name}) diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index cabebc24e9a..fc7529e682c 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -61,7 +61,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu nodes := NewNodes(f, 1, 1) driver := NewDriver(f, nodes, networkResources) // All tests get their own driver instance. b := newBuilder(f, driver) - ginkgo.It("registers plugin", func(ctx context.Context) { + ginkgo.It("registers plugin", func() { ginkgo.By("the driver is running") }) @@ -79,7 +79,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, pod, template) ginkgo.By("wait for NodePrepareResource call") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func(ctx context.Context) error { if driver.CallCount(m) == 0 { return errors.New("NodePrepareResource not called yet") } @@ -89,7 +89,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.By("allowing container startup to succeed") callCount := driver.CallCount(m) driver.Fail(m, false) - err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace) framework.ExpectNoError(err, "start pod with inline resource claim") if driver.CallCount(m) == callCount { framework.Fail("NodePrepareResource should have been called again") @@ -127,7 +127,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, claim, pod) - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) ginkgo.By(fmt.Sprintf("force delete test pod %s", pod.Name)) err := b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &zero}) @@ -157,7 +157,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, classParameters, claimParameters, pod, template) - b.testPod(f.ClientSet, pod, "user_a", "b", "admin_x", "y") + b.testPod(ctx, f.ClientSet, pod, "user_a", "b", "admin_x", "y") }) }) @@ -174,7 +174,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu pod, template := b.podInline(allocationMode) b.create(ctx, parameters, pod, template) - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) }) ginkgo.It("supports inline claim referenced by multiple containers", func(ctx context.Context) { @@ -182,7 +182,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu pod, template := b.podInlineMultiple(allocationMode) b.create(ctx, parameters, pod, template) - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) }) ginkgo.It("supports simple pod referencing external resource claim", func(ctx context.Context) { @@ -190,7 +190,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu pod := b.podExternal() b.create(ctx, parameters, b.externalClaim(allocationMode), pod) - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) }) ginkgo.It("supports external claim referenced by multiple pods", func(ctx context.Context) { @@ -202,7 +202,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, claim, pod1, pod2, pod3) for _, pod := range []*v1.Pod{pod1, pod2, pod3} { - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) } }) @@ -215,7 +215,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, claim, pod1, pod2, pod3) for _, pod := range []*v1.Pod{pod1, pod2, pod3} { - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) } }) @@ -228,7 +228,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu pod.Spec.InitContainers[0].Command = []string{"sh", "-c", "env | grep user_a=b"} b.create(ctx, parameters, pod, template) - b.testPod(f.ClientSet, pod) + b.testPod(ctx, f.ClientSet, pod) }) } @@ -277,7 +277,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, claim, pod1, pod2) for _, pod := range []*v1.Pod{pod1, pod2} { - err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) + err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "start pod") } }) @@ -307,7 +307,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, objs...) for _, pod := range pods { - err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) + err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "start pod") } @@ -369,7 +369,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu // To ensure the right timing, allocation of the second // claim gets delayed while creating another pod // that gets the remaining resource on the node. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() parameters := b.parameters() @@ -408,7 +408,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.By("waiting for one claim to be allocated") var nodeSelector *v1.NodeSelector - gomega.Eventually(func() (int, error) { + gomega.Eventually(ctx, func(ctx context.Context) (int, error) { claims, err := f.ClientSet.ResourceV1alpha1().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return 0, err @@ -434,14 +434,14 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu node := req.Values[0] pod2.Spec.NodeSelector = map[string]string{req.Key: node} b.create(ctx, pod2, template2) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod2), "start pod 2") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod2), "start pod 2") // Allow allocation of claim2 to proceed. It should fail now // and the other node must be used instead, after deallocating // the first claim. ginkgo.By("move first pod to other node") cancelBlockClaim() - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod1), "start pod 1") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod1), "start pod 1") pod1, err := f.ClientSet.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get first pod") if pod1.Spec.NodeName == "" { @@ -488,7 +488,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }, ) b1.create(ctx, parameters1, parameters2, claim1, claim2, pod) - b1.testPod(f.ClientSet, pod) + b1.testPod(ctx, f.ClientSet, pod) }) }) }) @@ -725,12 +725,12 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) { } // testPod runs pod and checks if container logs contain expected environment variables -func (b *builder) testPod(clientSet kubernetes.Interface, pod *v1.Pod, env ...string) { - err := e2epod.WaitForPodRunningInNamespace(clientSet, pod) +func (b *builder) testPod(ctx context.Context, clientSet kubernetes.Interface, pod *v1.Pod, env ...string) { + err := e2epod.WaitForPodRunningInNamespace(ctx, clientSet, pod) framework.ExpectNoError(err, "start pod") for _, container := range pod.Spec.Containers { - log, err := e2epod.GetPodLogs(clientSet, pod.Namespace, pod.Name, container.Name) + log, err := e2epod.GetPodLogs(ctx, clientSet, pod.Namespace, pod.Name, container.Name) framework.ExpectNoError(err, "get logs") if len(env) == 0 { for key, value := range b.parametersEnv() { @@ -762,9 +762,7 @@ func (b *builder) setUp() { ginkgo.DeferCleanup(b.tearDown) } -func (b *builder) tearDown() { - ctx := context.Background() - +func (b *builder) tearDown(ctx context.Context) { err := b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "delete resource class") diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 1d01d11126e..808284553a1 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -74,21 +74,21 @@ const ( var progressReporter = &e2ereporters.ProgressReporter{} -var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { +var _ = ginkgo.SynchronizedBeforeSuite(func(ctx context.Context) []byte { // Reference common test to make the import valid. commontest.CurrentSuite = commontest.E2E progressReporter.SetStartMsg() - setupSuite() + setupSuite(ctx) return nil -}, func(data []byte) { +}, func(ctx context.Context, data []byte) { // Run on all Ginkgo nodes - setupSuitePerGinkgoNode() + setupSuitePerGinkgoNode(ctx) }) var _ = ginkgo.SynchronizedAfterSuite(func() { progressReporter.SetEndMsg() -}, func() { - AfterSuiteActions() +}, func(ctx context.Context) { + AfterSuiteActions(ctx) }) // RunE2ETests checks configuration parameters (specified through flags) and then runs @@ -132,9 +132,9 @@ func RunE2ETests(t *testing.T) { // This unequivocally identifies the default IP family because services are single family // TODO: dual-stack may support multiple families per service // but we can detect if a cluster is dual stack because pods have two addresses (one per family) -func getDefaultClusterIPFamily(c clientset.Interface) string { +func getDefaultClusterIPFamily(ctx context.Context, c clientset.Interface) string { // Get the ClusterIP of the kubernetes service created in the default namespace - svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) } @@ -150,7 +150,7 @@ func getDefaultClusterIPFamily(c clientset.Interface) string { // daemonset are ready). // // If allowedNotReadyNodes is -1, this method returns immediately without waiting. -func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error { +func waitForDaemonSets(ctx context.Context, c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error { if allowedNotReadyNodes == -1 { return nil } @@ -159,8 +159,8 @@ func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes in framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) - return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) + return wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + dsList, err := c.AppsV1().DaemonSets(ns).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) return false, err @@ -191,7 +191,7 @@ func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes in // This function takes two parameters: one function which runs on only the first Ginkgo node, // returning an opaque byte array, and then a second function which runs on all Ginkgo nodes, // accepting the byte array. -func setupSuite() { +func setupSuite(ctx context.Context) { // Run only on Ginkgo node 1 switch framework.TestContext.Provider { @@ -207,7 +207,7 @@ func setupSuite() { // Delete any namespaces except those created by the system. This ensures no // lingering resources are left over from a previous test run. if framework.TestContext.CleanStart { - deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */ + deleted, err := framework.DeleteNamespaces(ctx, c, nil, /* deleteFilter */ []string{ metav1.NamespaceSystem, metav1.NamespaceDefault, @@ -217,7 +217,7 @@ func setupSuite() { if err != nil { framework.Failf("Error deleting orphaned namespaces: %v", err) } - if err := framework.WaitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { + if err := framework.WaitForNamespacesDeleted(ctx, c, deleted, namespaceCleanupTimeout); err != nil { framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) } } @@ -225,11 +225,11 @@ func setupSuite() { // In large clusters we may get to this point but still have a bunch // of nodes without Routes created. Since this would make a node // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { - nodes, err := e2enode.GetReadySchedulableNodes(c) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) framework.TestContext.CloudConfig.NumNodes = len(nodes.Items) } @@ -243,19 +243,19 @@ func setupSuite() { // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { - e2edebug.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) - e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) + e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } - if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { + if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) } if framework.TestContext.PrepullImages { framework.Logf("Pre-pulling images so that they are cached for the tests.") - prepullImages(c) + prepullImages(ctx, c) } // Log the version of the server and this client. @@ -273,7 +273,7 @@ func setupSuite() { if framework.TestContext.NodeKiller.Enabled { nodeKiller := e2enode.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) - go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh) + go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCtx) } } @@ -387,7 +387,7 @@ func lookupClusterImageSources() (string, string, error) { // such as making some global variables accessible to all parallel executions // Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite // Ref: https://onsi.github.io/ginkgo/#parallel-specs -func setupSuitePerGinkgoNode() { +func setupSuitePerGinkgoNode(ctx context.Context) { // Obtain the default IP family of the cluster // Some e2e test are designed to work on IPv4 only, this global variable // allows to adapt those tests to work on both IPv4 and IPv6 @@ -398,12 +398,12 @@ func setupSuitePerGinkgoNode() { if err != nil { klog.Fatal("Error loading client: ", err) } - framework.TestContext.IPFamily = getDefaultClusterIPFamily(c) + framework.TestContext.IPFamily = getDefaultClusterIPFamily(ctx, c) framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) } -func prepullImages(c clientset.Interface) { - namespace, err := framework.CreateTestingNS("img-puller", c, map[string]string{ +func prepullImages(ctx context.Context, c clientset.Interface) { + namespace, err := framework.CreateTestingNS(ctx, "img-puller", c, map[string]string{ "e2e-framework": "img-puller", }) framework.ExpectNoError(err) @@ -421,7 +421,7 @@ func prepullImages(c clientset.Interface) { dsName := fmt.Sprintf("img-pull-%s", strings.ReplaceAll(strings.ReplaceAll(img, "/", "-"), ":", "-")) dsSpec := daemonset.NewDaemonSet(dsName, img, label, nil, nil, nil) - ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), dsSpec, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, dsSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) imgPullers = append(imgPullers, ds) } @@ -432,11 +432,11 @@ func prepullImages(c clientset.Interface) { dsRetryTimeout := 5 * time.Minute for _, imgPuller := range imgPullers { - checkDaemonset := func() (bool, error) { - return daemonset.CheckPresentOnNodes(c, imgPuller, ns, framework.TestContext.CloudConfig.NumNodes) + checkDaemonset := func(ctx context.Context) (bool, error) { + return daemonset.CheckPresentOnNodes(ctx, c, imgPuller, ns, framework.TestContext.CloudConfig.NumNodes) } framework.Logf("Waiting for %s", imgPuller.Name) - err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonset) + err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonset) framework.ExpectNoError(err, "error waiting for image to be pulled") } } diff --git a/test/e2e/framework/auth/helpers.go b/test/e2e/framework/auth/helpers.go index 1b92ed07662..dd381be3c1a 100644 --- a/test/e2e/framework/auth/helpers.go +++ b/test/e2e/framework/auth/helpers.go @@ -45,13 +45,13 @@ type bindingsGetter interface { // WaitForAuthorizationUpdate checks if the given user can perform the named verb and action. // If policyCachePollTimeout is reached without the expected condition matching, an error is returned -func WaitForAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error { - return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed) +func WaitForAuthorizationUpdate(ctx context.Context, c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error { + return WaitForNamedAuthorizationUpdate(ctx, c, user, namespace, verb, "", resource, allowed) } // WaitForNamedAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource. // If policyCachePollTimeout is reached without the expected condition matching, an error is returned -func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error { +func WaitForNamedAuthorizationUpdate(ctx context.Context, c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error { review := &authorizationv1.SubjectAccessReview{ Spec: authorizationv1.SubjectAccessReviewSpec{ ResourceAttributes: &authorizationv1.ResourceAttributes{ @@ -65,8 +65,8 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette }, } - err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { - response, err := c.SubjectAccessReviews().Create(context.TODO(), review, metav1.CreateOptions{}) + err := wait.PollWithContext(ctx, policyCachePollInterval, policyCachePollTimeout, func(ctx context.Context) (bool, error) { + response, err := c.SubjectAccessReviews().Create(ctx, review, metav1.CreateOptions{}) if err != nil { return false, err } @@ -80,13 +80,13 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette // BindClusterRole binds the cluster role at the cluster scope. If RBAC is not enabled, nil // is returned with no action. -func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { - if !IsRBACEnabled(c) { +func BindClusterRole(ctx context.Context, c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { + if !IsRBACEnabled(ctx, c) { return nil } // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches - _, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ + _, err := c.ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: ns + "--" + clusterRole, }, @@ -107,23 +107,23 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv // BindClusterRoleInNamespace binds the cluster role at the namespace scope. If RBAC is not enabled, nil // is returned with no action. -func BindClusterRoleInNamespace(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { - return bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...) +func BindClusterRoleInNamespace(ctx context.Context, c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error { + return bindInNamespace(ctx, c, "ClusterRole", clusterRole, ns, subjects...) } // BindRoleInNamespace binds the role at the namespace scope. If RBAC is not enabled, nil // is returned with no action. -func BindRoleInNamespace(c bindingsGetter, role, ns string, subjects ...rbacv1.Subject) error { - return bindInNamespace(c, "Role", role, ns, subjects...) +func BindRoleInNamespace(ctx context.Context, c bindingsGetter, role, ns string, subjects ...rbacv1.Subject) error { + return bindInNamespace(ctx, c, "Role", role, ns, subjects...) } -func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rbacv1.Subject) error { - if !IsRBACEnabled(c) { +func bindInNamespace(ctx context.Context, c bindingsGetter, roleType, role, ns string, subjects ...rbacv1.Subject) error { + if !IsRBACEnabled(ctx, c) { return nil } // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches - _, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{ + _, err := c.RoleBindings(ns).Create(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: ns + "--" + role, }, @@ -148,9 +148,9 @@ var ( ) // IsRBACEnabled returns true if RBAC is enabled. Otherwise false. -func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool { +func IsRBACEnabled(ctx context.Context, crGetter v1rbac.ClusterRolesGetter) bool { isRBACEnabledOnce.Do(func() { - crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + crs, err := crGetter.ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) isRBACEnabled = false diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index c4ad250bb61..f618ba64ca2 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -139,8 +139,8 @@ type ResourceConsumer struct { } // NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer -func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer { - return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, +func NewDynamicResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer { + return newResourceConsumer(ctx, name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil, enableSidecar, sidecarType) } @@ -178,7 +178,7 @@ initMemoryTotal argument is in megabytes memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ -func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, +func newResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string, sidecarStatus SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer { if podAnnotations == nil { podAnnotations = make(map[string]string) @@ -202,11 +202,11 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl framework.ExpectNoError(err) resourceClient := dynamicClient.Resource(schema.GroupVersionResource{Group: crdGroup, Version: crdVersion, Resource: crdNamePlural}).Namespace(nsName) - runServiceAndWorkloadForResourceConsumer(clientset, resourceClient, apiExtensionClient, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers) + runServiceAndWorkloadForResourceConsumer(ctx, clientset, resourceClient, apiExtensionClient, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers) controllerName := name + "-ctrl" // If sidecar is enabled and busy, run service and consumer for sidecar if sidecarStatus == Enable && sidecarType == Busy { - runServiceAndSidecarForResourceConsumer(clientset, nsName, name, kind, replicas, serviceAnnotations) + runServiceAndSidecarForResourceConsumer(ctx, clientset, nsName, name, kind, replicas, serviceAnnotations) controllerName = name + "-sidecar-ctrl" } @@ -235,11 +235,11 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl sidecarStatus: sidecarStatus, } - go rc.makeConsumeCPURequests() + go rc.makeConsumeCPURequests(ctx) rc.ConsumeCPU(initCPUTotal) - go rc.makeConsumeMemRequests() + go rc.makeConsumeMemRequests(ctx) rc.ConsumeMem(initMemoryTotal) - go rc.makeConsumeCustomMetric() + go rc.makeConsumeCustomMetric(ctx) rc.ConsumeCustomMetric(initCustomMetric) return rc } @@ -262,7 +262,7 @@ func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) { rc.customMetric <- amount } -func (rc *ResourceConsumer) makeConsumeCPURequests() { +func (rc *ResourceConsumer) makeConsumeCPURequests(ctx context.Context) { defer ginkgo.GinkgoRecover() rc.stopWaitGroup.Add(1) defer rc.stopWaitGroup.Done() @@ -279,9 +279,12 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { case <-tick: if millicores != 0 { framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores) - rc.sendConsumeCPURequest(millicores) + rc.sendConsumeCPURequest(ctx, millicores) } tick = time.After(rc.sleepTime) + case <-ctx.Done(): + framework.Logf("RC %s: stopping CPU consumer: %v", rc.name, ctx.Err()) + return case <-rc.stopCPU: framework.Logf("RC %s: stopping CPU consumer", rc.name) return @@ -289,7 +292,7 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { } } -func (rc *ResourceConsumer) makeConsumeMemRequests() { +func (rc *ResourceConsumer) makeConsumeMemRequests(ctx context.Context) { defer ginkgo.GinkgoRecover() rc.stopWaitGroup.Add(1) defer rc.stopWaitGroup.Done() @@ -306,9 +309,12 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() { case <-tick: if megabytes != 0 { framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes) - rc.sendConsumeMemRequest(megabytes) + rc.sendConsumeMemRequest(ctx, megabytes) } tick = time.After(rc.sleepTime) + case <-ctx.Done(): + framework.Logf("RC %s: stopping mem consumer: %v", rc.name, ctx.Err()) + return case <-rc.stopMem: framework.Logf("RC %s: stopping mem consumer", rc.name) return @@ -316,7 +322,7 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() { } } -func (rc *ResourceConsumer) makeConsumeCustomMetric() { +func (rc *ResourceConsumer) makeConsumeCustomMetric(ctx context.Context) { defer ginkgo.GinkgoRecover() rc.stopWaitGroup.Add(1) defer rc.stopWaitGroup.Done() @@ -333,9 +339,12 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() { case <-tick: if delta != 0 { framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName) - rc.sendConsumeCustomMetric(delta) + rc.sendConsumeCustomMetric(ctx, delta) } tick = time.After(rc.sleepTime) + case <-ctx.Done(): + framework.Logf("RC %s: stopping metric consumer: %v", rc.name, ctx.Err()) + return case <-rc.stopCustomMetric: framework.Logf("RC %s: stopping metric consumer", rc.name) return @@ -343,11 +352,11 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() { } } -func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) +func (rc *ResourceConsumer) sendConsumeCPURequest(ctx context.Context, millicores int) { + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, serviceInitializationInterval, serviceInitializationTimeout, func(ctx context.Context) (bool, error) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). @@ -369,11 +378,11 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { } // sendConsumeMemRequest sends POST request for memory consumption -func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) +func (rc *ResourceConsumer) sendConsumeMemRequest(ctx context.Context, megabytes int) { + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, serviceInitializationInterval, serviceInitializationTimeout, func(ctx context.Context) (bool, error) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). @@ -395,11 +404,11 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { } // sendConsumeCustomMetric sends POST request for custom metric consumption -func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) +func (rc *ResourceConsumer) sendConsumeCustomMetric(ctx context.Context, delta int) { + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, serviceInitializationInterval, serviceInitializationTimeout, func(ctx context.Context) (bool, error) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). @@ -421,44 +430,44 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { } // GetReplicas get the replicas -func (rc *ResourceConsumer) GetReplicas() int { +func (rc *ResourceConsumer) GetReplicas(ctx context.Context) int { switch rc.kind { case KindRC: - replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) + replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if replicationController == nil { framework.Failf(rcIsNil) } return int(replicationController.Status.ReadyReplicas) case KindDeployment: - deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) + deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } return int(deployment.Status.ReadyReplicas) case KindReplicaSet: - rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) + rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if rs == nil { framework.Failf(rsIsNil) } return int(rs.Status.ReadyReplicas) case KindCRD: - deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) + deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } deploymentReplicas := int64(deployment.Status.ReadyReplicas) - scale, err := rc.scaleClient.Scales(rc.nsName).Get(context.TODO(), schema.GroupResource{Group: crdGroup, Resource: crdNamePlural}, rc.name, metav1.GetOptions{}) + scale, err := rc.scaleClient.Scales(rc.nsName).Get(ctx, schema.GroupResource{Group: crdGroup, Resource: crdNamePlural}, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) - crdInstance, err := rc.resourceClient.Get(context.TODO(), rc.name, metav1.GetOptions{}) + crdInstance, err := rc.resourceClient.Get(ctx, rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) // Update custom resource's status.replicas with child Deployment's current number of ready replicas. framework.ExpectNoError(unstructured.SetNestedField(crdInstance.Object, deploymentReplicas, "status", "replicas")) - _, err = rc.resourceClient.Update(context.TODO(), crdInstance, metav1.UpdateOptions{}) + _, err = rc.resourceClient.Update(ctx, crdInstance, metav1.UpdateOptions{}) framework.ExpectNoError(err) return int(scale.Spec.Replicas) default: @@ -468,15 +477,15 @@ func (rc *ResourceConsumer) GetReplicas() int { } // GetHpa get the corresponding horizontalPodAutoscaler object -func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) { - return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(context.TODO(), name, metav1.GetOptions{}) +func (rc *ResourceConsumer) GetHpa(ctx context.Context, name string) (*autoscalingv1.HorizontalPodAutoscaler, error) { + return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(ctx, name, metav1.GetOptions{}) } // WaitForReplicas wait for the desired replicas -func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) { +func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) { interval := 20 * time.Second - err := wait.PollImmediate(interval, duration, func() (bool, error) { - replicas := rc.GetReplicas() + err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { + replicas := rc.GetReplicas(ctx) framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. }) @@ -484,12 +493,12 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D } // EnsureDesiredReplicasInRange ensure the replicas is in a desired range -func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { +func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { interval := 10 * time.Second - err := wait.PollImmediate(interval, duration, func() (bool, error) { - replicas := rc.GetReplicas() + err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { + replicas := rc.GetReplicas(ctx) framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) - as, err := rc.GetHpa(hpaName) + as, err := rc.GetHpa(ctx, hpaName) if err != nil { framework.Logf("Error getting HPA: %s", err) } else { @@ -521,15 +530,15 @@ func (rc *ResourceConsumer) Pause() { } // Resume starts background goroutines responsible for consuming resources. -func (rc *ResourceConsumer) Resume() { +func (rc *ResourceConsumer) Resume(ctx context.Context) { ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name)) - go rc.makeConsumeCPURequests() - go rc.makeConsumeMemRequests() - go rc.makeConsumeCustomMetric() + go rc.makeConsumeCPURequests(ctx) + go rc.makeConsumeMemRequests(ctx) + go rc.makeConsumeCustomMetric(ctx) } // CleanUp clean up the background goroutines responsible for consuming resources. -func (rc *ResourceConsumer) CleanUp() { +func (rc *ResourceConsumer) CleanUp(ctx context.Context) { ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name)) close(rc.stopCPU) close(rc.stopMem) @@ -540,24 +549,24 @@ func (rc *ResourceConsumer) CleanUp() { kind := rc.kind.GroupKind() if kind.Kind == crdKind { gvr := schema.GroupVersionResource{Group: crdGroup, Version: crdVersion, Resource: crdNamePlural} - framework.ExpectNoError(e2eresource.DeleteCustomResourceAndWaitForGC(rc.clientSet, rc.dynamicClient, rc.scaleClient, gvr, rc.nsName, rc.name)) + framework.ExpectNoError(e2eresource.DeleteCustomResourceAndWaitForGC(ctx, rc.clientSet, rc.dynamicClient, rc.scaleClient, gvr, rc.nsName, rc.name)) } else { - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, rc.clientSet, kind, rc.nsName, rc.name)) } - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) - framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-ctrl", metav1.DeleteOptions{})) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name, metav1.DeleteOptions{})) + framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-ctrl", metav1.DeleteOptions{})) // Cleanup sidecar related resources if rc.sidecarStatus == Enable && rc.sidecarType == Busy { - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-sidecar", metav1.DeleteOptions{})) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-sidecar-ctrl", metav1.DeleteOptions{})) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-sidecar", metav1.DeleteOptions{})) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-sidecar-ctrl", metav1.DeleteOptions{})) } } -func createService(c clientset.Interface, name, ns string, annotations, selectors map[string]string, port int32, targetPort int) (*v1.Service, error) { - return c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ +func createService(ctx context.Context, c clientset.Interface, name, ns string, annotations, selectors map[string]string, port int32, targetPort int) (*v1.Service, error) { + return c.CoreV1().Services(ns).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Annotations: annotations, @@ -573,19 +582,19 @@ func createService(c clientset.Interface, name, ns string, annotations, selector } // runServiceAndSidecarForResourceConsumer creates service and runs resource consumer for sidecar container -func runServiceAndSidecarForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, serviceAnnotations map[string]string) { +func runServiceAndSidecarForResourceConsumer(ctx context.Context, c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, serviceAnnotations map[string]string) { ginkgo.By(fmt.Sprintf("Running consuming RC sidecar %s via %s with %v replicas", name, kind, replicas)) sidecarName := name + "-sidecar" serviceSelectors := map[string]string{ "name": name, } - _, err := createService(c, sidecarName, ns, serviceAnnotations, serviceSelectors, port, sidecarTargetPort) + _, err := createService(ctx, c, sidecarName, ns, serviceAnnotations, serviceSelectors, port, sidecarTargetPort) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Running controller for sidecar")) controllerName := sidecarName + "-ctrl" - _, err = createService(c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) + _, err = createService(ctx, c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) framework.ExpectNoError(err) dnsClusterFirst := v1.DNSClusterFirst @@ -600,15 +609,15 @@ func runServiceAndSidecarForResourceConsumer(c clientset.Interface, ns, name str DNSPolicy: &dnsClusterFirst, } - framework.ExpectNoError(e2erc.RunRC(controllerRcConfig)) + framework.ExpectNoError(e2erc.RunRC(ctx, controllerRcConfig)) // Wait for endpoints to propagate for the controller service. framework.ExpectNoError(framework.WaitForServiceEndpointsNum( - c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) + ctx, c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) } -func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceClient dynamic.ResourceInterface, apiExtensionClient crdclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container) { +func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.Interface, resourceClient dynamic.ResourceInterface, apiExtensionClient crdclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container) { ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := createService(c, name, ns, serviceAnnotations, map[string]string{"name": name}, port, targetPort) + _, err := createService(ctx, c, name, ns, serviceAnnotations, map[string]string{"name": name}, port, targetPort) framework.ExpectNoError(err) rcConfig := testutils.RCConfig{ @@ -634,25 +643,25 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceCli switch kind { case KindRC: - framework.ExpectNoError(e2erc.RunRC(rcConfig)) + framework.ExpectNoError(e2erc.RunRC(ctx, rcConfig)) case KindDeployment: ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) - framework.ExpectNoError(testutils.RunDeployment(dpConfig)) + framework.ExpectNoError(testutils.RunDeployment(ctx, dpConfig)) case KindReplicaSet: rsConfig := testutils.ReplicaSetConfig{ RCConfig: rcConfig, } ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace)) - framework.ExpectNoError(runReplicaSet(rsConfig)) + framework.ExpectNoError(runReplicaSet(ctx, rsConfig)) case KindCRD: - crd := CreateCustomResourceDefinition(apiExtensionClient) - crdInstance, err := CreateCustomSubresourceInstance(ns, name, resourceClient, crd) + crd := CreateCustomResourceDefinition(ctx, apiExtensionClient) + crdInstance, err := CreateCustomSubresourceInstance(ctx, ns, name, resourceClient, crd) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Creating deployment %s backing CRD in namespace %s", dpConfig.Name, dpConfig.Namespace)) - framework.ExpectNoError(testutils.RunDeployment(dpConfig)) + framework.ExpectNoError(testutils.RunDeployment(ctx, dpConfig)) - deployment, err := c.AppsV1().Deployments(dpConfig.Namespace).Get(context.TODO(), dpConfig.Name, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(dpConfig.Namespace).Get(ctx, dpConfig.Name, metav1.GetOptions{}) framework.ExpectNoError(err) deployment.SetOwnerReferences([]metav1.OwnerReference{{ APIVersion: kind.GroupVersion().String(), @@ -660,7 +669,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceCli Name: name, UID: crdInstance.GetUID(), }}) - _, err = c.AppsV1().Deployments(dpConfig.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + _, err = c.AppsV1().Deployments(dpConfig.Namespace).Update(ctx, deployment, metav1.UpdateOptions{}) framework.ExpectNoError(err) default: framework.Failf(invalidKind) @@ -668,7 +677,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceCli ginkgo.By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = createService(c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) + _, err = createService(ctx, c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) framework.ExpectNoError(err) dnsClusterFirst := v1.DNSClusterFirst @@ -683,13 +692,13 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceCli DNSPolicy: &dnsClusterFirst, } - framework.ExpectNoError(e2erc.RunRC(controllerRcConfig)) + framework.ExpectNoError(e2erc.RunRC(ctx, controllerRcConfig)) // Wait for endpoints to propagate for the controller service. framework.ExpectNoError(framework.WaitForServiceEndpointsNum( - c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) + ctx, c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) } -func CreateHorizontalPodAutoscaler(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, metrics []autoscalingv2.MetricSpec, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { +func CreateHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, metrics []autoscalingv2.MetricSpec, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { hpa := &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: targetRef.Name, @@ -702,12 +711,12 @@ func CreateHorizontalPodAutoscaler(rc *ResourceConsumer, targetRef autoscalingv2 Metrics: metrics, }, } - hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(namespace).Create(context.TODO(), hpa, metav1.CreateOptions{}) + hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(namespace).Create(ctx, hpa, metav1.CreateOptions{}) framework.ExpectNoError(errHPA) return hpa } -func CreateResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { +func CreateResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { targetRef := autoscalingv2.CrossVersionObjectReference{ APIVersion: rc.kind.GroupVersion().String(), Kind: rc.kind.Kind, @@ -722,27 +731,27 @@ func CreateResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1 }, }, } - return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) + return CreateHorizontalPodAutoscaler(ctx, rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) } -func CreateCPUResourceHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { - return CreateResourceHorizontalPodAutoscaler(rc, v1.ResourceCPU, autoscalingv2.UtilizationMetricType, cpu, minReplicas, maxReplicas) +func CreateCPUResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { + return CreateResourceHorizontalPodAutoscaler(ctx, rc, v1.ResourceCPU, autoscalingv2.UtilizationMetricType, cpu, minReplicas, maxReplicas) } // DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources. -func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) { - rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) +func DeleteHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { + framework.ExpectNoError(rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) } // runReplicaSet launches (and verifies correctness) of a replicaset. -func runReplicaSet(config testutils.ReplicaSetConfig) error { +func runReplicaSet(ctx context.Context, config testutils.ReplicaSetConfig) error { ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo config.ContainerDumpFunc = e2ekubectl.LogFailedContainers - return testutils.RunReplicaSet(config) + return testutils.RunReplicaSet(ctx, config) } -func CreateContainerResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { +func CreateContainerResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { targetRef := autoscalingv2.CrossVersionObjectReference{ APIVersion: rc.kind.GroupVersion().String(), Kind: rc.kind.Kind, @@ -758,12 +767,12 @@ func CreateContainerResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resour }, }, } - return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) + return CreateHorizontalPodAutoscaler(ctx, rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) } // DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources. -func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) { - rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) +func DeleteContainerResourceHPA(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { + framework.ExpectNoError(rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) } func CreateMetricTargetWithType(resourceType v1.ResourceName, targetType autoscalingv2.MetricTargetType, targetValue int32) autoscalingv2.MetricTarget { @@ -788,7 +797,7 @@ func CreateMetricTargetWithType(resourceType v1.ResourceName, targetType autosca return metricTarget } -func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler { +func CreateCPUHorizontalPodAutoscalerWithBehavior(ctx context.Context, rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler { hpa := &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: rc.name, @@ -817,7 +826,7 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int3 Behavior: behavior, }, } - hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{}) + hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(ctx, hpa, metav1.CreateOptions{}) framework.ExpectNoError(errHPA) return hpa } @@ -890,8 +899,8 @@ func HPABehaviorWithScaleLimitedByPercentage(scalingDirection ScalingDirection, return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule) } -func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) { - rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) +func DeleteHPAWithBehavior(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { + framework.ExpectNoError(rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) } // SidecarStatusType type for sidecar status @@ -910,7 +919,7 @@ const ( Idle SidecarWorkloadType = "Idle" ) -func CreateCustomResourceDefinition(c crdclientset.Interface) *apiextensionsv1.CustomResourceDefinition { +func CreateCustomResourceDefinition(ctx context.Context, c crdclientset.Interface) *apiextensionsv1.CustomResourceDefinition { crdSchema := &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: crdNamePlural + "." + crdGroup}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ @@ -939,12 +948,12 @@ func CreateCustomResourceDefinition(c crdclientset.Interface) *apiextensionsv1.C Status: apiextensionsv1.CustomResourceDefinitionStatus{}, } // Create Custom Resource Definition if it's not present. - crd, err := c.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdSchema.Name, metav1.GetOptions{}) + crd, err := c.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crdSchema.Name, metav1.GetOptions{}) if err != nil { - crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crdSchema, metav1.CreateOptions{}) + crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait until just created CRD appears in discovery. - err = wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { + err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { return ExistsInDiscovery(crd, c, "v1") }) framework.ExpectNoError(err) @@ -966,7 +975,7 @@ func ExistsInDiscovery(crd *apiextensionsv1.CustomResourceDefinition, apiExtensi return false, nil } -func CreateCustomSubresourceInstance(namespace, name string, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { +func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { instance := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": crdGroup + "/" + crdVersion, @@ -985,7 +994,7 @@ func CreateCustomSubresourceInstance(namespace, name string, client dynamic.Reso }, }, } - instance, err := client.Create(context.TODO(), instance, metav1.CreateOptions{}) + instance, err := client.Create(ctx, instance, metav1.CreateOptions{}) if err != nil { framework.Logf("%#v", instance) return nil, err diff --git a/test/e2e/framework/daemonset/fixtures.go b/test/e2e/framework/daemonset/fixtures.go index 765335c0d1f..a2b99eb4c5c 100644 --- a/test/e2e/framework/daemonset/fixtures.go +++ b/test/e2e/framework/daemonset/fixtures.go @@ -62,25 +62,25 @@ func NewDaemonSet(dsName, image string, labels map[string]string, volumes []v1.V } } -func CheckRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) (bool, error) { - nodeNames := SchedulableNodes(f.ClientSet, ds) - return CheckDaemonPodOnNodes(f, ds, nodeNames)() +func CheckRunningOnAllNodes(ctx context.Context, f *framework.Framework, ds *appsv1.DaemonSet) (bool, error) { + nodeNames := SchedulableNodes(ctx, f.ClientSet, ds) + return CheckDaemonPodOnNodes(f, ds, nodeNames)(ctx) } // CheckPresentOnNodes will check that the daemonset will be present on at least the given number of // schedulable nodes. -func CheckPresentOnNodes(c clientset.Interface, ds *appsv1.DaemonSet, ns string, numNodes int) (bool, error) { - nodeNames := SchedulableNodes(c, ds) +func CheckPresentOnNodes(ctx context.Context, c clientset.Interface, ds *appsv1.DaemonSet, ns string, numNodes int) (bool, error) { + nodeNames := SchedulableNodes(ctx, c, ds) if len(nodeNames) < numNodes { return false, nil } - return checkDaemonPodStateOnNodes(c, ds, ns, nodeNames, func(pod *v1.Pod) bool { + return checkDaemonPodStateOnNodes(ctx, c, ds, ns, nodeNames, func(pod *v1.Pod) bool { return pod.Status.Phase != v1.PodPending }) } -func SchedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string { - nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func SchedulableNodes(ctx context.Context, c clientset.Interface, ds *appsv1.DaemonSet) []string { + nodeList, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { @@ -94,16 +94,16 @@ func SchedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string { return nodeNames } -func CheckDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) { - return func() (bool, error) { - return checkDaemonPodStateOnNodes(f.ClientSet, ds, f.Namespace.Name, nodeNames, func(pod *v1.Pod) bool { +func CheckDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + return checkDaemonPodStateOnNodes(ctx, f.ClientSet, ds, f.Namespace.Name, nodeNames, func(pod *v1.Pod) bool { return podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) }) } } -func checkDaemonPodStateOnNodes(c clientset.Interface, ds *appsv1.DaemonSet, ns string, nodeNames []string, stateChecker func(*v1.Pod) bool) (bool, error) { - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) +func checkDaemonPodStateOnNodes(ctx context.Context, c clientset.Interface, ds *appsv1.DaemonSet, ns string, nodeNames []string, stateChecker func(*v1.Pod) bool) (bool, error) { + podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("could not get the pod list: %v", err) return false, nil @@ -139,8 +139,8 @@ func checkDaemonPodStateOnNodes(c clientset.Interface, ds *appsv1.DaemonSet, ns return len(nodesToPodCount) == len(nodeNames), nil } -func CheckDaemonStatus(f *framework.Framework, dsName string) error { - ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{}) +func CheckDaemonStatus(ctx context.Context, f *framework.Framework, dsName string) error { + ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(ctx, dsName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/framework/debug/dump.go b/test/e2e/framework/debug/dump.go index 9244bf0e00d..0f78d70eafb 100644 --- a/test/e2e/framework/debug/dump.go +++ b/test/e2e/framework/debug/dump.go @@ -58,25 +58,25 @@ func dumpEventsInNamespace(eventsLister EventsLister, namespace string) { } // DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace. -func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { +func DumpAllNamespaceInfo(ctx context.Context, c clientset.Interface, namespace string) { dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { - return c.CoreV1().Events(ns).List(context.TODO(), opts) + return c.CoreV1().Events(ns).List(ctx, opts) }, namespace) - e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir) + e2epod.DumpAllPodInfoForNamespace(ctx, c, namespace, framework.TestContext.ReportDir) // If cluster is large, then the following logs are basically useless, because: // 1. it takes tens of minutes or hours to grab all of them // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := framework.TestContext.MaxNodesToGather - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("unable to fetch node list: %v", err) return } if len(nodes.Items) <= maxNodesForDump { - dumpAllNodeInfo(c, nodes) + dumpAllNodeInfo(ctx, c, nodes) } else { framework.Logf("skipping dumping cluster info - cluster too large") } @@ -95,31 +95,31 @@ func (o byFirstTimestamp) Less(i, j int) bool { return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) } -func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) { +func dumpAllNodeInfo(ctx context.Context, c clientset.Interface, nodes *v1.NodeList) { names := make([]string, len(nodes.Items)) for ix := range nodes.Items { names[ix] = nodes.Items[ix].Name } - DumpNodeDebugInfo(c, names, framework.Logf) + DumpNodeDebugInfo(ctx, c, names, framework.Logf) } // DumpNodeDebugInfo dumps debug information of the given nodes. -func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { +func DumpNodeDebugInfo(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) - node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, n, metav1.GetOptions{}) if err != nil { logFunc("Error getting node info %v", err) } logFunc("Node Info: %v", node) logFunc("\nLogging kubelet events for node %v", n) - for _, e := range getNodeEvents(c, n) { + for _, e := range getNodeEvents(ctx, c, n) { logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v", e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject) } logFunc("\nLogging pods the kubelet thinks is on node %v", n) - podList, err := getKubeletPods(c, n) + podList, err := getKubeletPods(ctx, c, n) if err != nil { logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err) continue @@ -135,13 +135,14 @@ func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(f c.Name, c.Ready, c.RestartCount) } } - e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc) + _, err = e2emetrics.HighLatencyKubeletOperations(ctx, c, 10*time.Second, n, logFunc) + framework.ExpectNoError(err) // TODO: Log node resource info } } // getKubeletPods retrieves the list of pods on the kubelet. -func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { +func getKubeletPods(ctx context.Context, c clientset.Interface, node string) (*v1.PodList, error) { var client restclient.Result finished := make(chan struct{}, 1) go func() { @@ -151,7 +152,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)). Suffix("pods"). - Do(context.TODO()) + Do(ctx) finished <- struct{}{} }() @@ -170,7 +171,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { // logNodeEvents logs kubelet events from the given node. This includes kubelet // restart and node unhealthy events. Note that listing events like this will mess // with latency metrics, beware of calling it during a test. -func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { +func getNodeEvents(ctx context.Context, c clientset.Interface, nodeName string) []v1.Event { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": nodeName, @@ -178,7 +179,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options) + events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(ctx, options) if err != nil { framework.Logf("Unexpected error retrieving node events %v", err) return []v1.Event{} diff --git a/test/e2e/framework/debug/init/init.go b/test/e2e/framework/debug/init/init.go index 769db07c5c2..adfcb3242a3 100644 --- a/test/e2e/framework/debug/init/init.go +++ b/test/e2e/framework/debug/init/init.go @@ -19,6 +19,7 @@ limitations under the License. package init import ( + "context" "sync" "time" @@ -30,28 +31,23 @@ import ( func init() { framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions, func(f *framework.Framework) { - f.DumpAllNamespaceInfo = func(f *framework.Framework, ns string) { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns) + f.DumpAllNamespaceInfo = func(ctx context.Context, f *framework.Framework, ns string) { + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, ns) } if framework.TestContext.GatherLogsSizes { - var ( - wg sync.WaitGroup - closeChannel chan bool - verifier *e2edebug.LogsSizeVerifier - ) - ginkgo.BeforeEach(func() { + var wg sync.WaitGroup wg.Add(1) - closeChannel = make(chan bool) - verifier = e2edebug.NewLogsVerifier(f.ClientSet, closeChannel) + ctx, cancel := context.WithCancel(context.Background()) + verifier := e2edebug.NewLogsVerifier(ctx, f.ClientSet) go func() { defer wg.Done() - verifier.Run() + verifier.Run(ctx) }() ginkgo.DeferCleanup(func() { ginkgo.By("Gathering log sizes data", func() { - close(closeChannel) + cancel() wg.Wait() f.TestSummaries = append(f.TestSummaries, verifier.GetSummary()) }) @@ -61,7 +57,7 @@ func init() { if framework.TestContext.GatherKubeSystemResourceUsageData != "false" && framework.TestContext.GatherKubeSystemResourceUsageData != "none" { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var nodeMode e2edebug.NodesSet switch framework.TestContext.GatherKubeSystemResourceUsageData { case "master": @@ -72,7 +68,7 @@ func init() { nodeMode = e2edebug.AllNodes } - gatherer, err := e2edebug.NewResourceUsageGatherer(f.ClientSet, e2edebug.ResourceGathererOptions{ + gatherer, err := e2edebug.NewResourceUsageGatherer(ctx, f.ClientSet, e2edebug.ResourceGathererOptions{ InKubemark: framework.ProviderIs("kubemark"), Nodes: nodeMode, ResourceDataGatheringPeriod: 60 * time.Second, @@ -84,7 +80,7 @@ func init() { return } - go gatherer.StartGatheringData() + go gatherer.StartGatheringData(ctx) ginkgo.DeferCleanup(func() { ginkgo.By("Collecting resource usage data", func() { summary, resourceViolationError := gatherer.StopAndSummarize([]int{90, 99, 100}, nil /* no constraints */) diff --git a/test/e2e/framework/debug/log_size_monitoring.go b/test/e2e/framework/debug/log_size_monitoring.go index b5f53a208fb..073cbe891af 100644 --- a/test/e2e/framework/debug/log_size_monitoring.go +++ b/test/e2e/framework/debug/log_size_monitoring.go @@ -18,6 +18,7 @@ package debug import ( "bytes" + "context" "fmt" "strconv" "strings" @@ -156,8 +157,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int } // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed -func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier { - nodeAddresses, err := e2essh.NodeSSHHosts(c) +func NewLogsVerifier(ctx context.Context, c clientset.Interface) *LogsSizeVerifier { + nodeAddresses, err := e2essh.NodeSSHHosts(ctx, c) framework.ExpectNoError(err) instanceAddress := framework.APIAddress() + ":22" @@ -166,7 +167,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri verifier := &LogsSizeVerifier{ client: c, - stopChannel: stopChannel, data: prepareData(instanceAddress, nodeAddresses), masterAddress: instanceAddress, nodeAddresses: nodeAddresses, @@ -177,7 +177,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri verifier.wg.Add(workersNo) for i := 0; i < workersNo; i++ { workers[i] = &LogSizeGatherer{ - stopChannel: stopChannel, data: verifier.data, wg: &verifier.wg, workChannel: workChannel, @@ -207,7 +206,7 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary { } // Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed -func (s *LogsSizeVerifier) Run() { +func (s *LogsSizeVerifier) Run(ctx context.Context) { s.workChannel <- WorkItem{ ip: s.masterAddress, paths: masterLogsToCheck, @@ -221,15 +220,15 @@ func (s *LogsSizeVerifier) Run() { } } for _, worker := range s.workers { - go worker.Run() + go worker.Run(ctx) } <-s.stopChannel s.wg.Wait() } // Run starts log size gathering. -func (g *LogSizeGatherer) Run() { - for g.Work() { +func (g *LogSizeGatherer) Run(ctx context.Context) { + for g.Work(ctx) { } } @@ -245,7 +244,7 @@ func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) { // Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine, // gathers data, writes it to the shared map, and creates a gorouting which reinserts work item into // the queue with a delay. Returns false if worker should exit. -func (g *LogSizeGatherer) Work() bool { +func (g *LogSizeGatherer) Work(ctx context.Context) bool { var workItem WorkItem select { case <-g.stopChannel: @@ -254,6 +253,7 @@ func (g *LogSizeGatherer) Work() bool { case workItem = <-g.workChannel: } sshResult, err := e2essh.SSH( + ctx, fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")), workItem.ip, framework.TestContext.Provider, diff --git a/test/e2e/framework/debug/resource_usage_gatherer.go b/test/e2e/framework/debug/resource_usage_gatherer.go index f401cac27e0..9c6537ed9d4 100644 --- a/test/e2e/framework/debug/resource_usage_gatherer.go +++ b/test/e2e/framework/debug/resource_usage_gatherer.go @@ -181,10 +181,10 @@ type resourceGatherWorker struct { printVerboseLogs bool } -func (w *resourceGatherWorker) singleProbe() { +func (w *resourceGatherWorker) singleProbe(ctx context.Context) { data := make(ResourceUsagePerContainer) if w.inKubemark { - kubemarkData := getKubemarkMasterComponentsResourceUsage() + kubemarkData := getKubemarkMasterComponentsResourceUsage(ctx) if kubemarkData == nil { return } @@ -319,22 +319,26 @@ func removeUint64Ptr(ptr *uint64) uint64 { return *ptr } -func (w *resourceGatherWorker) gather(initialSleep time.Duration) { +func (w *resourceGatherWorker) gather(ctx context.Context, initialSleep time.Duration) { defer utilruntime.HandleCrash() defer w.wg.Done() defer framework.Logf("Closing worker for %v", w.nodeName) defer func() { w.finished = true }() select { case <-time.After(initialSleep): - w.singleProbe() + w.singleProbe(ctx) for { select { case <-time.After(w.resourceDataGatheringPeriod): - w.singleProbe() + w.singleProbe(ctx) + case <-ctx.Done(): + return case <-w.stopCh: return } } + case <-ctx.Done(): + return case <-w.stopCh: return } @@ -373,11 +377,11 @@ const ( // nodeHasControlPlanePods returns true if specified node has control plane pods // (kube-scheduler and/or kube-controller-manager). -func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) { +func nodeHasControlPlanePods(ctx context.Context, c clientset.Interface, nodeName string) (bool, error) { regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") - podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{ + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), }) if err != nil { @@ -395,7 +399,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro } // NewResourceUsageGatherer returns a new ContainerResourceGatherer. -func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { +func NewResourceUsageGatherer(ctx context.Context, c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) { g := ContainerResourceGatherer{ client: c, stopCh: make(chan struct{}), @@ -420,7 +424,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt // Tracks kube-system pods if no valid PodList is passed in. var err error if pods == nil { - pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{}) + pods, err = c.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Error while listing Pods: %v", err) return nil, err @@ -429,7 +433,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt dnsNodes := make(map[string]bool) for _, pod := range pods.Items { if options.Nodes == MasterNodes { - isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName) + isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName) if err != nil { return nil, err } @@ -438,7 +442,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt } } if options.Nodes == MasterAndDNSNodes { - isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName) + isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName) if err != nil { return nil, err } @@ -456,14 +460,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt dnsNodes[pod.Spec.NodeName] = true } } - nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Error while listing Nodes: %v", err) return nil, err } for _, node := range nodeList.Items { - isControlPlane, err := nodeHasControlPlanePods(c, node.Name) + isControlPlane, err := nodeHasControlPlanePods(ctx, c, node.Name) if err != nil { return nil, err } @@ -491,14 +495,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt // StartGatheringData starts a stat gathering worker blocks for each node to track, // and blocks until StopAndSummarize is called. -func (g *ContainerResourceGatherer) StartGatheringData() { +func (g *ContainerResourceGatherer) StartGatheringData(ctx context.Context) { if len(g.workers) == 0 { return } delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers)) delay := time.Duration(0) for i := range g.workers { - go g.workers[i].gather(delay) + go g.workers[i].gather(ctx, delay) delay += delayPeriod } g.workerWg.Wait() @@ -603,8 +607,8 @@ type kubemarkResourceUsage struct { CPUUsageInCores float64 } -func getMasterUsageByPrefix(prefix string) (string, error) { - sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider) +func getMasterUsageByPrefix(ctx context.Context, prefix string) (string, error) { + sshResult, err := e2essh.SSH(ctx, fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider) if err != nil { return "", err } @@ -612,10 +616,10 @@ func getMasterUsageByPrefix(prefix string) (string, error) { } // getKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name. -func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsage { +func getKubemarkMasterComponentsResourceUsage(ctx context.Context) map[string]*kubemarkResourceUsage { result := make(map[string]*kubemarkResourceUsage) // Get kubernetes component resource usage - sshResult, err := getMasterUsageByPrefix("kube") + sshResult, err := getMasterUsageByPrefix(ctx, "kube") if err != nil { framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err) return nil @@ -633,7 +637,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag } } // Get etcd resource usage - sshResult, err = getMasterUsageByPrefix("bin/etcd") + sshResult, err = getMasterUsageByPrefix(ctx, "bin/etcd") if err != nil { framework.Logf("Error when trying to SSH to master machine. Skipping probe") return nil diff --git a/test/e2e/framework/deployment/fixtures.go b/test/e2e/framework/deployment/fixtures.go index a6118534445..462bbc6ea08 100644 --- a/test/e2e/framework/deployment/fixtures.go +++ b/test/e2e/framework/deployment/fixtures.go @@ -71,9 +71,9 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s } // CreateDeployment creates a deployment. -func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) { +func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) { deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } @@ -86,14 +86,14 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[ } // GetPodsForDeployment gets pods for the given deployment -func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (*v1.PodList, error) { +func GetPodsForDeployment(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment) (*v1.PodList, error) { replicaSetSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return nil, err } replicaSetListOptions := metav1.ListOptions{LabelSelector: replicaSetSelector.String()} - allReplicaSets, err := client.AppsV1().ReplicaSets(deployment.Namespace).List(context.TODO(), replicaSetListOptions) + allReplicaSets, err := client.AppsV1().ReplicaSets(deployment.Namespace).List(ctx, replicaSetListOptions) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deploym return nil, err } podListOptions := metav1.ListOptions{LabelSelector: podSelector.String()} - allPods, err := client.CoreV1().Pods(deployment.Namespace).List(context.TODO(), podListOptions) + allPods, err := client.CoreV1().Pods(deployment.Namespace).List(ctx, podListOptions) if err != nil { return nil, err } diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 39d70209957..80b931c2837 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -32,15 +32,15 @@ type Action func() error // WaitTimeoutForEvent waits the given timeout duration for an event to occur. // Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests. -func WaitTimeoutForEvent(c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { +func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { interval := 2 * time.Second - return wait.PollImmediate(interval, timeout, eventOccurred(c, namespace, eventSelector, msg)) + return wait.PollImmediateWithContext(ctx, interval, timeout, eventOccurred(c, namespace, eventSelector, msg)) } -func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionFunc { +func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc { options := metav1.ListOptions{FieldSelector: eventSelector} - return func() (bool, error) { - events, err := c.CoreV1().Events(namespace).List(context.TODO(), options) + return func(ctx context.Context) (bool, error) { + events, err := c.CoreV1().Events(namespace).List(ctx, options) if err != nil { return false, fmt.Errorf("got error while getting events: %v", err) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index ddbbcb195c4..c8ba3aa0879 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -128,7 +128,7 @@ type Framework struct { // DumpAllNamespaceInfoAction is called after each failed test for namespaces // created for the test. -type DumpAllNamespaceInfoAction func(f *Framework, namespace string) +type DumpAllNamespaceInfoAction func(ctx context.Context, f *Framework, namespace string) // TestDataSummary is an interface for managing test data. type TestDataSummary interface { @@ -184,7 +184,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface) } // BeforeEach gets a client and makes a namespace. -func (f *Framework) BeforeEach() { +func (f *Framework) BeforeEach(ctx context.Context) { // DeferCleanup, in contrast to AfterEach, triggers execution in // first-in-last-out order. This ensures that the framework instance // remains valid as long as possible. @@ -235,7 +235,7 @@ func (f *Framework) BeforeEach() { if !f.SkipNamespaceCreation { ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName)) - namespace, err := f.CreateNamespace(f.BaseName, map[string]string{ + namespace, err := f.CreateNamespace(ctx, f.BaseName, map[string]string{ "e2e-framework": f.BaseName, }) ExpectNoError(err) @@ -244,10 +244,10 @@ func (f *Framework) BeforeEach() { if TestContext.VerifyServiceAccount { ginkgo.By("Waiting for a default service account to be provisioned in namespace") - err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) + err = WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name) ExpectNoError(err) ginkgo.By("Waiting for kube-root-ca.crt to be provisioned in namespace") - err = WaitForKubeRootCAInNamespace(f.ClientSet, namespace.Name) + err = WaitForKubeRootCAInNamespace(ctx, f.ClientSet, namespace.Name) ExpectNoError(err) } else { Logf("Skipping waiting for service account") @@ -261,7 +261,7 @@ func (f *Framework) BeforeEach() { f.flakeReport = NewFlakeReport() } -func (f *Framework) dumpNamespaceInfo() { +func (f *Framework) dumpNamespaceInfo(ctx context.Context) { if !ginkgo.CurrentSpecReport().Failed() { return } @@ -274,7 +274,7 @@ func (f *Framework) dumpNamespaceInfo() { ginkgo.By("dump namespace information after failure", func() { if !f.SkipNamespaceCreation { for _, ns := range f.namespacesToDelete { - f.DumpAllNamespaceInfo(f, ns.Name) + f.DumpAllNamespaceInfo(ctx, f, ns.Name) } } }) @@ -318,7 +318,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) { } // AfterEach deletes the namespace, after reading its events. -func (f *Framework) AfterEach() { +func (f *Framework) AfterEach(ctx context.Context) { // This should not happen. Given ClientSet is a public field a test must have updated it! // Error out early before any API calls during cleanup. if f.ClientSet == nil { @@ -335,13 +335,13 @@ func (f *Framework) AfterEach() { if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentSpecReport().Failed()) { for _, ns := range f.namespacesToDelete { ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) - if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { nsDeletionErrors[ns.Name] = err // Dump namespace if we are unable to delete the namespace and the dump was not already performed. if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil { - f.DumpAllNamespaceInfo(f, ns.Name) + f.DumpAllNamespaceInfo(ctx, f, ns.Name) } } else { Logf("Namespace %v was already deleted", ns.Name) @@ -388,14 +388,14 @@ func (f *Framework) AfterEach() { // DeleteNamespace can be used to delete a namespace. Additionally it can be used to // dump namespace information so as it can be used as an alternative of framework // deleting the namespace towards the end. -func (f *Framework) DeleteNamespace(name string) { +func (f *Framework) DeleteNamespace(ctx context.Context, name string) { defer func() { - err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { Logf("error deleting namespace %s: %v", name, err) return } - err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout) + err = WaitForNamespacesDeleted(ctx, f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout) if err != nil { Logf("error deleting namespace %s: %v", name, err) return @@ -412,13 +412,13 @@ func (f *Framework) DeleteNamespace(name string) { }() // if current test failed then we should dump namespace information if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil { - f.DumpAllNamespaceInfo(f, name) + f.DumpAllNamespaceInfo(ctx, f, name) } } // CreateNamespace creates a namespace for e2e testing. -func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) { +func (f *Framework) CreateNamespace(ctx context.Context, baseName string, labels map[string]string) (*v1.Namespace, error) { createTestingNS := TestContext.CreateTestingNS if createTestingNS == nil { createTestingNS = CreateTestingNS @@ -440,7 +440,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) ( } labels[admissionapi.EnforceLevelLabel] = string(enforceLevel) - ns, err := createTestingNS(baseName, f.ClientSet, labels) + ns, err := createTestingNS(ctx, baseName, f.ClientSet, labels) // check ns instead of err to see if it's nil as we may // fail to create serviceAccount in it. f.AddNamespacesToDelete(ns) @@ -599,7 +599,7 @@ func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool { } // filterLabels returns a list of pods which have labels. -func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) { +func filterLabels(ctx context.Context, selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) { var err error var selector labels.Selector var pl *v1.PodList @@ -608,9 +608,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin if len(selectors) > 0 { selector = labels.SelectorFromSet(labels.Set(selectors)) options := metav1.ListOptions{LabelSelector: selector.String()} - pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options) + pl, err = cli.CoreV1().Pods(ns).List(ctx, options) } else { - pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + pl, err = cli.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) } return pl, err } @@ -618,13 +618,13 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin // filter filters pods which pass a filter. It can be used to compose // the more useful abstractions like ForEach, WaitFor, and so on, which // can be used directly by tests. -func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) { +func (p *PodStateVerification) filter(ctx context.Context, c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) { if len(p.ValidPhases) == 0 || namespace == nil { panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace)) } ns := namespace.Name - pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against. + pl, err := filterLabels(ctx, p.Selectors, c, ns) // Build an v1.PodList to operate against. Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors) if len(pl.Items) == 0 || err != nil { return pl.Items, err @@ -652,12 +652,12 @@ ReturnPodsSoFar: // WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification // definition. -func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) { +func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout time.Duration) ([]v1.Pod, error) { pods := []v1.Pod{} var returnedErr error - err := wait.Poll(1*time.Second, timeout, func() (bool, error) { - pods, returnedErr = cl.podState.filter(cl.client, cl.namespace) + err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) { + pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace) // Failure if returnedErr != nil { @@ -680,8 +680,8 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1 } // WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong. -func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) { - pods, err := cl.WaitFor(atLeast, timeout) +func (cl *ClusterVerification) WaitForOrFail(ctx context.Context, atLeast int, timeout time.Duration) { + pods, err := cl.WaitFor(ctx, atLeast, timeout) if err != nil || len(pods) < atLeast { Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err) } @@ -692,8 +692,8 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) // // For example, if you require at least 5 pods to be running before your test will pass, // its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach. -func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error { - pods, err := cl.podState.filter(cl.client, cl.namespace) +func (cl *ClusterVerification) ForEach(ctx context.Context, podFunc func(v1.Pod)) error { + pods, err := cl.podState.filter(ctx, cl.client, cl.namespace) if err == nil { if len(pods) == 0 { Failf("No pods matched the filter.") diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index f138c14868c..06daf645e9b 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -169,8 +169,8 @@ type NegStatus struct { } // SimpleGET executes a get on the given url, returns error if non-200 returned. -func SimpleGET(c *http.Client, url, host string) (string, error) { - req, err := http.NewRequest("GET", url, nil) +func SimpleGET(ctx context.Context, c *http.Client, url, host string) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return "", err } @@ -194,11 +194,11 @@ func SimpleGET(c *http.Client, url, host string) (string, error) { // PollURL polls till the url responds with a healthy http code. If // expectUnreachable is true, it breaks on first non-healthy http code instead. -func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error { +func PollURL(ctx context.Context, route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error { var lastBody string - pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) { + pollErr := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { var err error - lastBody, err = SimpleGET(httpClient, route, host) + lastBody, err = SimpleGET(ctx, httpClient, route, host) if err != nil { framework.Logf("host %v path %v: %v unreachable", host, route, err) return expectUnreachable, nil @@ -216,7 +216,7 @@ func PollURL(route, host string, timeout time.Duration, interval time.Duration, // CreateIngressComformanceTests generates an slice of sequential test cases: // a simple http ingress, ingress with HTTPS, ingress HTTPS with a modified hostname, // ingress https with a modified URLMap -func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[string]string) []ConformanceTests { +func CreateIngressComformanceTests(ctx context.Context, jig *TestJig, ns string, annotations map[string]string) []ConformanceTests { manifestPath := filepath.Join(IngressManifestPath, "http") // These constants match the manifests used in IngressManifestPath tlsHost := "foo.bar.com" @@ -229,19 +229,19 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri tests := []ConformanceTests{ { fmt.Sprintf("should create a basic HTTP ingress"), - func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) }, + func() { jig.CreateIngress(ctx, manifestPath, ns, annotations, annotations) }, fmt.Sprintf("waiting for urls on basic HTTP ingress"), }, { fmt.Sprintf("should terminate TLS for host %v", tlsHost), - func() { jig.SetHTTPS(tlsSecretName, tlsHost) }, + func() { jig.SetHTTPS(ctx, tlsSecretName, tlsHost) }, fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"), }, { fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath), func() { var pathToFail string - jig.Update(func(ing *networkingv1.Ingress) { + jig.Update(ctx, func(ing *networkingv1.Ingress) { newRules := []networkingv1.IngressRule{} for _, rule := range ing.Spec.Rules { if rule.Host != updateURLMapHost { @@ -269,7 +269,7 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri }) ginkgo.By("Checking that " + pathToFail + " is not exposed by polling for failure") route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail) - framework.ExpectNoError(PollURL(route, updateURLMapHost, e2eservice.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) + framework.ExpectNoError(PollURL(ctx, route, updateURLMapHost, e2eservice.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) }, fmt.Sprintf("Waiting for path updates to reflect in L7"), }, @@ -279,7 +279,7 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri tests = append(tests, ConformanceTests{ fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost), func() { - jig.Update(func(ing *networkingv1.Ingress) { + jig.Update(ctx, func(ing *networkingv1.Ingress) { newRules := []networkingv1.IngressRule{} for _, rule := range ing.Spec.Rules { if rule.Host != tlsHost { @@ -293,7 +293,7 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri } ing.Spec.Rules = newRules }) - jig.SetHTTPS(tlsSecretName, updatedTLSHost) + jig.SetHTTPS(ctx, tlsSecretName, updatedTLSHost) }, fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost), }) @@ -388,7 +388,7 @@ func BuildInsecureClient(timeout time.Duration) *http.Client { // createTLSSecret creates a secret containing TLS certificates. // If a secret with the same name already pathExists in the namespace of the // Ingress, it's updated. -func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) { +func createTLSSecret(ctx context.Context, kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) { host = strings.Join(hosts, ",") framework.Logf("Generating RSA cert for host %v", host) cert, key, err := GenerateRSACerts(host, true) @@ -405,13 +405,13 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin }, } var s *v1.Secret - if s, err = kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}); err == nil { + if s, err = kubeClient.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}); err == nil { framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) s.Data = secret.Data - _, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), s, metav1.UpdateOptions{}) + _, err = kubeClient.CoreV1().Secrets(namespace).Update(ctx, s, metav1.UpdateOptions{}) } else { framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) - _, err = kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = kubeClient.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) } return host, cert, key, err } @@ -448,7 +448,7 @@ func NewIngressTestJig(c clientset.Interface) *TestJig { // Optional: secret.yaml, ingAnnotations // If ingAnnotations is specified it will overwrite any annotations in ing.yaml // If svcAnnotations is specified it will overwrite any annotations in svc.yaml -func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { +func (j *TestJig) CreateIngress(ctx context.Context, manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { var err error read := func(file string) string { data, err := e2etestfiles.Read(filepath.Join(manifestPath, file)) @@ -471,11 +471,11 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri j.Logger.Infof("creating service") e2ekubectl.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-") if len(svcAnnotations) > 0 { - svcList, err := j.Client.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err := j.Client.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations = svcAnnotations - _, err = j.Client.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = j.Client.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } } @@ -493,7 +493,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri j.Ingress.Spec.IngressClassName = &j.Class } j.Logger.Infof("creating %v ingress", j.Ingress.Name) - j.Ingress, err = j.runCreate(j.Ingress) + j.Ingress, err = j.runCreate(ctx, j.Ingress) framework.ExpectNoError(err) } @@ -542,9 +542,9 @@ func ingressToManifest(ing *networkingv1.Ingress, path string) error { } // runCreate runs the required command to create the given ingress. -func (j *TestJig) runCreate(ing *networkingv1.Ingress) (*networkingv1.Ingress, error) { +func (j *TestJig) runCreate(ctx context.Context, ing *networkingv1.Ingress) (*networkingv1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1().Ingresses(ing.Namespace).Create(context.TODO(), ing, metav1.CreateOptions{}) + return j.Client.NetworkingV1().Ingresses(ing.Namespace).Create(ctx, ing, metav1.CreateOptions{}) } // Use kubemci to create a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -556,9 +556,9 @@ func (j *TestJig) runCreate(ing *networkingv1.Ingress) (*networkingv1.Ingress, e } // runUpdate runs the required command to update the given ingress. -func (j *TestJig) runUpdate(ing *networkingv1.Ingress) (*networkingv1.Ingress, error) { +func (j *TestJig) runUpdate(ctx context.Context, ing *networkingv1.Ingress) (*networkingv1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1().Ingresses(ing.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{}) + return j.Client.NetworkingV1().Ingresses(ing.Namespace).Update(ctx, ing, metav1.UpdateOptions{}) } // Use kubemci to update a multicluster ingress. // kubemci does not have an update command. We use "create --force" to update an existing ingress. @@ -579,16 +579,16 @@ func DescribeIng(ns string) { } // Update retrieves the ingress, performs the passed function, and then updates it. -func (j *TestJig) Update(update func(ing *networkingv1.Ingress)) { +func (j *TestJig) Update(ctx context.Context, update func(ing *networkingv1.Ingress)) { var err error ns, name := j.Ingress.Namespace, j.Ingress.Name for i := 0; i < 3; i++ { - j.Ingress, err = j.Client.NetworkingV1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) + j.Ingress, err = j.Client.NetworkingV1().Ingresses(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) } update(j.Ingress) - j.Ingress, err = j.runUpdate(j.Ingress) + j.Ingress, err = j.runUpdate(ctx, j.Ingress) if err == nil { DescribeIng(j.Ingress.Namespace) return @@ -601,24 +601,24 @@ func (j *TestJig) Update(update func(ing *networkingv1.Ingress)) { } // AddHTTPS updates the ingress to add this secret for these hosts. -func (j *TestJig) AddHTTPS(secretName string, hosts ...string) { +func (j *TestJig) AddHTTPS(ctx context.Context, secretName string, hosts ...string) { // TODO: Just create the secret in GetRootCAs once we're watching secrets in // the ingress controller. - _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) + _, cert, _, err := createTLSSecret(ctx, j.Client, j.Ingress.Namespace, secretName, hosts...) framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *networkingv1.Ingress) { + j.Update(ctx, func(ing *networkingv1.Ingress) { ing.Spec.TLS = append(ing.Spec.TLS, networkingv1.IngressTLS{Hosts: hosts, SecretName: secretName}) }) j.RootCAs[secretName] = cert } // SetHTTPS updates the ingress to use only this secret for these hosts. -func (j *TestJig) SetHTTPS(secretName string, hosts ...string) { - _, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...) +func (j *TestJig) SetHTTPS(ctx context.Context, secretName string, hosts ...string) { + _, cert, _, err := createTLSSecret(ctx, j.Client, j.Ingress.Namespace, secretName, hosts...) framework.ExpectNoError(err) j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *networkingv1.Ingress) { + j.Update(ctx, func(ing *networkingv1.Ingress) { ing.Spec.TLS = []networkingv1.IngressTLS{{Hosts: hosts, SecretName: secretName}} }) j.RootCAs = map[string][]byte{secretName: cert} @@ -626,7 +626,7 @@ func (j *TestJig) SetHTTPS(secretName string, hosts ...string) { // RemoveHTTPS updates the ingress to not use this secret for TLS. // Note: Does not delete the secret. -func (j *TestJig) RemoveHTTPS(secretName string) { +func (j *TestJig) RemoveHTTPS(ctx context.Context, secretName string) { newTLS := []networkingv1.IngressTLS{} for _, ingressTLS := range j.Ingress.Spec.TLS { if secretName != ingressTLS.SecretName { @@ -634,15 +634,15 @@ func (j *TestJig) RemoveHTTPS(secretName string) { } } j.Logger.Infof("Updating ingress %v to not use secret %v for TLS termination", j.Ingress.Name, secretName) - j.Update(func(ing *networkingv1.Ingress) { + j.Update(ctx, func(ing *networkingv1.Ingress) { ing.Spec.TLS = newTLS }) delete(j.RootCAs, secretName) } // PrepareTLSSecret creates a TLS secret and caches the cert. -func (j *TestJig) PrepareTLSSecret(namespace, secretName string, hosts ...string) error { - _, cert, _, err := createTLSSecret(j.Client, namespace, secretName, hosts...) +func (j *TestJig) PrepareTLSSecret(ctx context.Context, namespace, secretName string, hosts ...string) error { + _, cert, _, err := createTLSSecret(ctx, j.Client, namespace, secretName, hosts...) if err != nil { return err } @@ -661,20 +661,20 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) { } // TryDeleteIngress attempts to delete the ingress resource and logs errors if they occur. -func (j *TestJig) TryDeleteIngress() { - j.tryDeleteGivenIngress(j.Ingress) +func (j *TestJig) TryDeleteIngress(ctx context.Context) { + j.tryDeleteGivenIngress(ctx, j.Ingress) } -func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1.Ingress) { - if err := j.runDelete(ing); err != nil { +func (j *TestJig) tryDeleteGivenIngress(ctx context.Context, ing *networkingv1.Ingress) { + if err := j.runDelete(ctx, ing); err != nil { j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err) } } // runDelete runs the required command to delete the given ingress. -func (j *TestJig) runDelete(ing *networkingv1.Ingress) error { +func (j *TestJig) runDelete(ctx context.Context, ing *networkingv1.Ingress) error { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, metav1.DeleteOptions{}) + return j.Client.NetworkingV1().Ingresses(ing.Namespace).Delete(ctx, ing.Name, metav1.DeleteOptions{}) } // Use kubemci to delete a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -710,11 +710,11 @@ func findIPv4(input string) string { } // getIngressAddress returns the ips/hostnames associated with the Ingress. -func getIngressAddress(client clientset.Interface, ns, name, class string) ([]string, error) { +func getIngressAddress(ctx context.Context, client clientset.Interface, ns, name, class string) ([]string, error) { if class == MulticlusterIngressClassValue { return getIngressAddressFromKubemci(name) } - ing, err := client.NetworkingV1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ing, err := client.NetworkingV1().Ingresses(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -731,10 +731,10 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st } // WaitForIngressAddress waits for the Ingress to acquire an address. -func (j *TestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { +func (j *TestJig) WaitForIngressAddress(ctx context.Context, c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { var address string - err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { - ipOrNameList, err := getIngressAddress(c, ns, ingName, j.Class) + err := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { + ipOrNameList, err := getIngressAddress(ctx, c, ns, ingName, j.Class) if err != nil || len(ipOrNameList) == 0 { j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList) return false, err @@ -746,7 +746,7 @@ func (j *TestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName strin return address, err } -func (j *TestJig) pollIngressWithCert(ing *networkingv1.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) pollIngressWithCert(ctx context.Context, ing *networkingv1.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error { // Check that all rules respond to a simple GET. knownHostsSet := sets.NewString(knownHosts...) for _, rules := range ing.Spec.Rules { @@ -764,14 +764,14 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1.Ingress, address string, for _, p := range rules.IngressRuleValue.HTTP.Paths { if waitForNodePort { nodePort := int(p.Backend.Service.Port.Number) - if err := j.pollServiceNodePort(ing.Namespace, p.Backend.Service.Name, nodePort); err != nil { + if err := j.pollServiceNodePort(ctx, ing.Namespace, p.Backend.Service.Name, nodePort); err != nil { j.Logger.Infof("Error in waiting for nodeport %d on service %v/%v: %s", nodePort, ing.Namespace, p.Backend.Service.Name, err) return err } } route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) j.Logger.Infof("Testing route %v host %v with simple GET", route, rules.Host) - if err := PollURL(route, rules.Host, timeout, j.PollInterval, timeoutClient, false); err != nil { + if err := PollURL(ctx, route, rules.Host, timeout, j.PollInterval, timeoutClient, false); err != nil { return err } } @@ -782,16 +782,16 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1.Ingress, address string, // WaitForIngress waits for the Ingress to get an address. // WaitForIngress returns when it gets the first 200 response -func (j *TestJig) WaitForIngress(waitForNodePort bool) { - if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.GetServiceLoadBalancerPropagationTimeout(j.Client)); err != nil { +func (j *TestJig) WaitForIngress(ctx context.Context, waitForNodePort bool) { + if err := j.WaitForGivenIngressWithTimeout(ctx, j.Ingress, waitForNodePort, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client)); err != nil { framework.Failf("error in waiting for ingress to get an address: %s", err) } } // WaitForIngressToStable waits for the LB return 100 consecutive 200 responses. -func (j *TestJig) WaitForIngressToStable() { - if err := wait.Poll(10*time.Second, e2eservice.GetServiceLoadBalancerPropagationTimeout(j.Client), func() (bool, error) { - _, err := j.GetDistinctResponseFromIngress() +func (j *TestJig) WaitForIngressToStable(ctx context.Context) { + if err := wait.PollWithContext(ctx, 10*time.Second, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client), func(ctx context.Context) (bool, error) { + _, err := j.GetDistinctResponseFromIngress(ctx) if err != nil { return false, nil } @@ -806,9 +806,9 @@ func (j *TestJig) WaitForIngressToStable() { // http or https). If waitForNodePort is true, the NodePort of the Service // is verified before verifying the Ingress. NodePort is currently a // requirement for cloudprovider Ingress. -func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1.Ingress, waitForNodePort bool, timeout time.Duration) error { +func (j *TestJig) WaitForGivenIngressWithTimeout(ctx context.Context, ing *networkingv1.Ingress, waitForNodePort bool, timeout time.Duration) error { // Wait for the loadbalancer IP. - address, err := j.WaitForIngressAddress(j.Client, ing.Namespace, ing.Name, timeout) + address, err := j.WaitForIngressAddress(ctx, j.Client, ing.Namespace, ing.Name, timeout) if err != nil { return fmt.Errorf("Ingress failed to acquire an IP address within %v", timeout) } @@ -819,7 +819,7 @@ func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1.Ingress, wait knownHosts = ing.Spec.TLS[0].Hosts cert = j.GetRootCA(ing.Spec.TLS[0].SecretName) } - return j.pollIngressWithCert(ing, address, knownHosts, cert, waitForNodePort, timeout) + return j.pollIngressWithCert(ctx, ing, address, knownHosts, cert, waitForNodePort, timeout) } // WaitForIngressWithCert waits till the ingress acquires an IP, then waits for its @@ -827,22 +827,22 @@ func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1.Ingress, wait // waitForNodePort is true, the NodePort of the Service is verified before // verifying the Ingress. NodePort is currently a requirement for cloudprovider // Ingress. Hostnames and certificate need to be explicitly passed in. -func (j *TestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error { +func (j *TestJig) WaitForIngressWithCert(ctx context.Context, waitForNodePort bool, knownHosts []string, cert []byte) error { // Wait for the loadbalancer IP. - propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(j.Client) - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, propagationTimeout) + propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client) + address, err := j.WaitForIngressAddress(ctx, j.Client, j.Ingress.Namespace, j.Ingress.Name, propagationTimeout) if err != nil { return fmt.Errorf("Ingress failed to acquire an IP address within %v", propagationTimeout) } - return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, propagationTimeout) + return j.pollIngressWithCert(ctx, j.Ingress, address, knownHosts, cert, waitForNodePort, propagationTimeout) } // VerifyURL polls for the given iterations, in intervals, and fails if the // given url returns a non-healthy http code even once. -func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error { +func (j *TestJig) VerifyURL(ctx context.Context, route, host string, iterations int, interval time.Duration, httpClient *http.Client) error { for i := 0; i < iterations; i++ { - b, err := SimpleGET(httpClient, route, host) + b, err := SimpleGET(ctx, httpClient, route, host) if err != nil { framework.Logf(b) return err @@ -853,18 +853,18 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du return nil } -func (j *TestJig) pollServiceNodePort(ns, name string, port int) error { +func (j *TestJig) pollServiceNodePort(ctx context.Context, ns, name string, port int) error { // TODO: Curl all nodes? - u, err := getPortURL(j.Client, ns, name, port) + u, err := getPortURL(ctx, j.Client, ns, name, port) if err != nil { return err } - return PollURL(u, "", 30*time.Second, j.PollInterval, &http.Client{Timeout: IngressReqTimeout}, false) + return PollURL(ctx, u, "", 30*time.Second, j.PollInterval, &http.Client{Timeout: IngressReqTimeout}, false) } // getSvcNodePort returns the node port for the given service:port. -func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { - svc, err := client.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func getSvcNodePort(ctx context.Context, client clientset.Interface, ns, name string, svcPort int) (int, error) { + svc, err := client.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return 0, err } @@ -880,8 +880,8 @@ func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (i } // getPortURL returns the url to a nodeport Service. -func getPortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) { - nodePort, err := getSvcNodePort(client, ns, name, svcPort) +func getPortURL(ctx context.Context, client clientset.Interface, ns, name string, svcPort int) (string, error) { + nodePort, err := getSvcNodePort(ctx, client, ns, name, svcPort) if err != nil { return "", err } @@ -889,8 +889,8 @@ func getPortURL(client clientset.Interface, ns, name string, svcPort int) (strin // unschedulable, since control plane nodes don't run kube-proxy. Without // kube-proxy NodePorts won't work. var nodes *v1.NodeList - if wait.PollImmediate(poll, framework.SingleCallTimeout, func() (bool, error) { - nodes, err = client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + if wait.PollImmediateWithContext(ctx, poll, framework.SingleCallTimeout, func(ctx context.Context) (bool, error) { + nodes, err = client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -919,9 +919,9 @@ func getPortURL(client clientset.Interface, ns, name string, svcPort int) (strin // GetIngressNodePorts returns related backend services' nodePorts. // Current GCE ingress controller allows traffic to the default HTTP backend // by default, so retrieve its nodePort if includeDefaultBackend is true. -func (j *TestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { +func (j *TestJig) GetIngressNodePorts(ctx context.Context, includeDefaultBackend bool) []string { nodePorts := []string{} - svcPorts := j.GetServicePorts(includeDefaultBackend) + svcPorts := j.GetServicePorts(ctx, includeDefaultBackend) for _, svcPort := range svcPorts { nodePorts = append(nodePorts, strconv.Itoa(int(svcPort.NodePort))) } @@ -931,10 +931,10 @@ func (j *TestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { // GetServicePorts returns related backend services' svcPorts. // Current GCE ingress controller allows traffic to the default HTTP backend // by default, so retrieve its nodePort if includeDefaultBackend is true. -func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort { +func (j *TestJig) GetServicePorts(ctx context.Context, includeDefaultBackend bool) map[string]v1.ServicePort { svcPorts := make(map[string]v1.ServicePort) if includeDefaultBackend { - defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(context.TODO(), defaultBackendName, metav1.GetOptions{}) + defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(ctx, defaultBackendName, metav1.GetOptions{}) framework.ExpectNoError(err) svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0] } @@ -949,7 +949,7 @@ func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.Serv } } for _, svcName := range backendSvcs { - svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(context.TODO(), svcName, metav1.GetOptions{}) + svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(ctx, svcName, metav1.GetOptions{}) framework.ExpectNoError(err) svcPorts[svcName] = svc.Spec.Ports[0] } @@ -957,8 +957,8 @@ func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.Serv } // ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource -func (j *TestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags []string) *compute.Firewall { - nodePorts := j.GetIngressNodePorts(true) +func (j *TestJig) ConstructFirewallForIngress(ctx context.Context, firewallRuleName string, nodeTags []string) *compute.Firewall { + nodePorts := j.GetIngressNodePorts(ctx, true) fw := compute.Firewall{} fw.Name = firewallRuleName @@ -974,10 +974,10 @@ func (j *TestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags } // GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses. -func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) { +func (j *TestJig) GetDistinctResponseFromIngress(ctx context.Context) (sets.String, error) { // Wait for the loadbalancer IP. - propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(j.Client) - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, propagationTimeout) + propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client) + address, err := j.WaitForIngressAddress(ctx, j.Client, j.Ingress.Namespace, j.Ingress.Name, propagationTimeout) if err != nil { framework.Failf("Ingress failed to acquire an IP address within %v", propagationTimeout) } @@ -986,7 +986,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) { for i := 0; i < 100; i++ { url := fmt.Sprintf("http://%v", address) - res, err := SimpleGET(timeoutClient, url, "") + res, err := SimpleGET(ctx, timeoutClient, url, "") if err != nil { j.Logger.Errorf("Failed to GET %q. Got responses: %q: %v", url, res, err) return responses, err @@ -1006,13 +1006,13 @@ type NginxIngressController struct { } // Init initializes the NginxIngressController -func (cont *NginxIngressController) Init() { +func (cont *NginxIngressController) Init(ctx context.Context) { // Set up a LoadBalancer service in front of nginx ingress controller and pass it via // --publish-service flag (see /nginx/rc.yaml) to make it work in private // clusters, i.e. clusters where nodes don't have public IPs. framework.Logf("Creating load balancer service for nginx ingress controller") serviceJig := e2eservice.NewTestJig(cont.Client, cont.Ns, "nginx-ingress-lb") - _, err := serviceJig.CreateTCPService(func(svc *v1.Service) { + _, err := serviceJig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.Selector = map[string]string{"k8s-app": "nginx-ingress-lb"} svc.Spec.Ports = []v1.ServicePort{ @@ -1021,7 +1021,7 @@ func (cont *NginxIngressController) Init() { {Name: "stats", Port: 18080}} }) framework.ExpectNoError(err) - cont.lbSvc, err = serviceJig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cont.Client)) + cont.lbSvc, err = serviceJig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cont.Client)) framework.ExpectNoError(err) read := func(file string) string { @@ -1035,14 +1035,14 @@ func (cont *NginxIngressController) Init() { framework.Logf("initializing nginx ingress controller") e2ekubectl.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-") - rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{}) + rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(ctx, "nginx-ingress-controller", metav1.GetOptions{}) framework.ExpectNoError(err) cont.rc = rc framework.Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) - pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: sel.String()}) + pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(ctx, metav1.ListOptions{LabelSelector: sel.String()}) framework.ExpectNoError(err) if len(pods.Items) == 0 { framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) @@ -1052,12 +1052,12 @@ func (cont *NginxIngressController) Init() { } // TearDown cleans up the NginxIngressController. -func (cont *NginxIngressController) TearDown() { +func (cont *NginxIngressController) TearDown(ctx context.Context) { if cont.lbSvc == nil { framework.Logf("No LoadBalancer service created, no cleanup necessary") return } - e2eservice.WaitForServiceDeletedWithFinalizer(cont.Client, cont.Ns, cont.lbSvc.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, cont.Client, cont.Ns, cont.lbSvc.Name) } func generateBacksideHTTPSIngressSpec(ns string) *networkingv1.Ingress { @@ -1122,12 +1122,12 @@ func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment { } // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. -func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1.Ingress, error) { - deployCreated, err := cs.AppsV1().Deployments(namespace).Create(context.TODO(), generateBacksideHTTPSDeploymentSpec(), metav1.CreateOptions{}) +func (j *TestJig) SetUpBacksideHTTPSIngress(ctx context.Context, cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1.Ingress, error) { + deployCreated, err := cs.AppsV1().Deployments(namespace).Create(ctx, generateBacksideHTTPSDeploymentSpec(), metav1.CreateOptions{}) if err != nil { return nil, nil, nil, err } - svcCreated, err := cs.CoreV1().Services(namespace).Create(context.TODO(), generateBacksideHTTPSServiceSpec(), metav1.CreateOptions{}) + svcCreated, err := cs.CoreV1().Services(namespace).Create(ctx, generateBacksideHTTPSServiceSpec(), metav1.CreateOptions{}) if err != nil { return nil, nil, nil, err } @@ -1138,7 +1138,7 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st } ingToCreate.Annotations[IngressStaticIPKey] = staticIPName } - ingCreated, err := j.runCreate(ingToCreate) + ingCreated, err := j.runCreate(ctx, ingToCreate) if err != nil { return nil, nil, nil, err } @@ -1146,20 +1146,20 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st } // DeleteTestResource deletes given deployment, service and ingress. -func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Deployment, svc *v1.Service, ing *networkingv1.Ingress) []error { +func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface, deploy *appsv1.Deployment, svc *v1.Service, ing *networkingv1.Ingress) []error { var errs []error if ing != nil { - if err := j.runDelete(ing); err != nil { + if err := j.runDelete(ctx, ing); err != nil { errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } if svc != nil { - if err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil { + if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } if deploy != nil { - if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(context.TODO(), deploy.Name, metav1.DeleteOptions{}); err != nil { + if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err)) } } diff --git a/test/e2e/framework/job/rest.go b/test/e2e/framework/job/rest.go index 0d3f38a700f..7497032600d 100644 --- a/test/e2e/framework/job/rest.go +++ b/test/e2e/framework/job/rest.go @@ -27,13 +27,13 @@ import ( ) // GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid. -func GetJob(c clientset.Interface, ns, name string) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func GetJob(ctx context.Context, c clientset.Interface, ns, name string) (*batchv1.Job, error) { + return c.BatchV1().Jobs(ns).Get(ctx, name, metav1.GetOptions{}) } // GetAllRunningJobPods returns a list of all running Pods belonging to a Job. -func GetAllRunningJobPods(c clientset.Interface, ns, jobName string) ([]v1.Pod, error) { - if podList, err := GetJobPods(c, ns, jobName); err != nil { +func GetAllRunningJobPods(ctx context.Context, c clientset.Interface, ns, jobName string) ([]v1.Pod, error) { + if podList, err := GetJobPods(ctx, c, ns, jobName); err != nil { return nil, err } else { pods := []v1.Pod{} @@ -47,20 +47,20 @@ func GetAllRunningJobPods(c clientset.Interface, ns, jobName string) ([]v1.Pod, } // GetJobPods returns a list of Pods belonging to a Job. -func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) { +func GetJobPods(ctx context.Context, c clientset.Interface, ns, jobName string) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} - return c.CoreV1().Pods(ns).List(context.TODO(), options) + return c.CoreV1().Pods(ns).List(ctx, options) } // CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has // been created. -func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{}) +func CreateJob(ctx context.Context, c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { + return c.BatchV1().Jobs(ns).Create(ctx, job, metav1.CreateOptions{}) } // CreateJob uses c to update a job in namespace ns. If the returned error is // nil, the returned Job is valid and has been updated. -func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Update(context.TODO(), job, metav1.UpdateOptions{}) +func UpdateJob(ctx context.Context, c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { + return c.BatchV1().Jobs(ns).Update(ctx, job, metav1.UpdateOptions{}) } diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 13e1f63f63e..3f94dc03182 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -31,19 +31,19 @@ import ( // WaitForJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use // when pods will run for a long time, or it will be racy. -func WaitForJobPodsRunning(c clientset.Interface, ns, jobName string, expectedCount int32) error { - return waitForJobPodsInPhase(c, ns, jobName, expectedCount, v1.PodRunning) +func WaitForJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32) error { + return waitForJobPodsInPhase(ctx, c, ns, jobName, expectedCount, v1.PodRunning) } // WaitForJobPodsSucceeded wait for all pods for the Job named JobName in namespace ns to become Succeeded. -func WaitForJobPodsSucceeded(c clientset.Interface, ns, jobName string, expectedCount int32) error { - return waitForJobPodsInPhase(c, ns, jobName, expectedCount, v1.PodSucceeded) +func WaitForJobPodsSucceeded(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32) error { + return waitForJobPodsInPhase(ctx, c, ns, jobName, expectedCount, v1.PodSucceeded) } // waitForJobPodsInPhase wait for all pods for the Job named JobName in namespace ns to be in a given phase. -func waitForJobPodsInPhase(c clientset.Interface, ns, jobName string, expectedCount int32, phase v1.PodPhase) error { - return wait.Poll(framework.Poll, JobTimeout, func() (bool, error) { - pods, err := GetJobPods(c, ns, jobName) +func waitForJobPodsInPhase(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32, phase v1.PodPhase) error { + return wait.PollWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + pods, err := GetJobPods(ctx, c, ns, jobName) if err != nil { return false, err } @@ -58,9 +58,9 @@ func waitForJobPodsInPhase(c clientset.Interface, ns, jobName string, expectedCo } // WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns. -func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error { - return wait.Poll(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) +func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, completions int32) error { + return wait.PollWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } @@ -90,9 +90,9 @@ func isJobFailed(j *batchv1.Job) bool { } // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). -func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { - return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) +func WaitForJobFinish(ctx context.Context, c clientset.Interface, ns, jobName string) error { + return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } @@ -112,9 +112,9 @@ func isJobFinished(j *batchv1.Job) bool { } // WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed. -func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { - return wait.Poll(framework.Poll, timeout, func() (bool, error) { - _, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) +func WaitForJobGone(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration) error { + return wait.PollWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + _, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -124,9 +124,9 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns // to be deleted. -func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error { - return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - pods, err := GetJobPods(c, ns, jobName) +func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobName string) error { + return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + pods, err := GetJobPods(ctx, c, ns, jobName) if err != nil { return false, err } diff --git a/test/e2e/framework/kubectl/kubectl_utils.go b/test/e2e/framework/kubectl/kubectl_utils.go index 56e4bf75bcb..b3fe86b8bba 100644 --- a/test/e2e/framework/kubectl/kubectl_utils.go +++ b/test/e2e/framework/kubectl/kubectl_utils.go @@ -100,8 +100,8 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd { } // LogFailedContainers runs `kubectl logs` on a failed containers. -func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) +func LogFailedContainers(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return @@ -109,18 +109,18 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri logFunc("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { if res, err := testutils.PodRunningReady(&pod); !res || err != nil { - kubectlLogPod(c, pod, "", framework.Logf) + kubectlLogPod(ctx, c, pod, "", framework.Logf) } } } -func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { +func kubectlLogPod(ctx context.Context, c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, containerNameSubstr) { // Contains() matches all strings if substr is empty - logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name) + logs, err := e2epod.GetPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name) if err != nil { - logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + logs, err = e2epod.GetPreviousPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name) if err != nil { logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) } diff --git a/test/e2e/framework/kubelet/config.go b/test/e2e/framework/kubelet/config.go index b0eccf97ddc..edc9e2d48c5 100644 --- a/test/e2e/framework/kubelet/config.go +++ b/test/e2e/framework/kubelet/config.go @@ -17,6 +17,7 @@ limitations under the License. package kubelet import ( + "context" "crypto/tls" "encoding/json" "fmt" @@ -36,8 +37,8 @@ import ( ) // GetCurrentKubeletConfig fetches the current Kubelet Config for the given node -func GetCurrentKubeletConfig(nodeName, namespace string, useProxy bool) (*kubeletconfig.KubeletConfiguration, error) { - resp := pollConfigz(5*time.Minute, 5*time.Second, nodeName, namespace, useProxy) +func GetCurrentKubeletConfig(ctx context.Context, nodeName, namespace string, useProxy bool) (*kubeletconfig.KubeletConfiguration, error) { + resp := pollConfigz(ctx, 5*time.Minute, 5*time.Second, nodeName, namespace, useProxy) if len(resp) == 0 { return nil, fmt.Errorf("failed to fetch /configz from %q", nodeName) } @@ -49,7 +50,7 @@ func GetCurrentKubeletConfig(nodeName, namespace string, useProxy bool) (*kubele } // returns a status 200 response from the /configz endpoint or nil if fails -func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName, namespace string, useProxy bool) []byte { +func pollConfigz(ctx context.Context, timeout time.Duration, pollInterval time.Duration, nodeName, namespace string, useProxy bool) []byte { endpoint := "" if useProxy { // start local proxy, so we can send graceful deletion over query string, rather than body parameter @@ -89,7 +90,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName, na req.Header.Add("Accept", "application/json") var respBody []byte - err = wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + err = wait.PollImmediateWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) { resp, err := client.Do(req) if err != nil { framework.Logf("Failed to get /configz, retrying. Error: %v", err) diff --git a/test/e2e/framework/kubelet/kubelet_pods.go b/test/e2e/framework/kubelet/kubelet_pods.go index 96a3fe31f37..723a4f96c94 100644 --- a/test/e2e/framework/kubelet/kubelet_pods.go +++ b/test/e2e/framework/kubelet/kubelet_pods.go @@ -17,26 +17,28 @@ limitations under the License. package kubelet import ( + "context" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" ) // GetKubeletPods retrieves the list of pods on the kubelet. -func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { - return getKubeletPods(c, node, "pods") +func GetKubeletPods(ctx context.Context, c clientset.Interface, node string) (*v1.PodList, error) { + return getKubeletPods(ctx, c, node, "pods") } // GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods // includes necessary information (e.g., UID, name, namespace for // pods/containers), but do not contain the full spec. -func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) { - return getKubeletPods(c, node, "runningpods") +func GetKubeletRunningPods(ctx context.Context, c clientset.Interface, node string) (*v1.PodList, error) { + return getKubeletPods(ctx, c, node, "runningpods") } -func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) { +func getKubeletPods(ctx context.Context, c clientset.Interface, node, resource string) (*v1.PodList, error) { result := &v1.PodList{} - client, err := ProxyRequest(c, node, resource, framework.KubeletPort) + client, err := ProxyRequest(ctx, c, node, resource, framework.KubeletPort) if err != nil { return &v1.PodList{}, err } diff --git a/test/e2e/framework/kubelet/stats.go b/test/e2e/framework/kubelet/stats.go index 6484081e022..e910646fa44 100644 --- a/test/e2e/framework/kubelet/stats.go +++ b/test/e2e/framework/kubelet/stats.go @@ -98,7 +98,7 @@ type RuntimeOperationErrorRate struct { } // ProxyRequest performs a get on a node proxy endpoint given the nodename and rest client. -func ProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) { +func ProxyRequest(ctx context.Context, c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165 var result restclient.Result finished := make(chan struct{}, 1) @@ -108,7 +108,7 @@ func ProxyRequest(c clientset.Interface, node, endpoint string, port int) (restc SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, port)). Suffix(endpoint). - Do(context.TODO()) + Do(ctx) finished <- struct{}{} }() @@ -121,12 +121,12 @@ func ProxyRequest(c clientset.Interface, node, endpoint string, port int) (restc } // NewRuntimeOperationMonitor returns a new RuntimeOperationMonitor. -func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor { +func NewRuntimeOperationMonitor(ctx context.Context, c clientset.Interface) *RuntimeOperationMonitor { m := &RuntimeOperationMonitor{ client: c, nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), } - nodes, err := m.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := m.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) } @@ -134,15 +134,15 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor m.nodesRuntimeOps[node.Name] = make(NodeRuntimeOperationErrorRate) } // Initialize the runtime operation error rate - m.GetRuntimeOperationErrorRate() + m.GetRuntimeOperationErrorRate(ctx) return m } // GetRuntimeOperationErrorRate gets runtime operation records from kubelet metrics and calculate // error rates of all runtime operations. -func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate { +func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate(ctx context.Context) map[string]NodeRuntimeOperationErrorRate { for node := range m.nodesRuntimeOps { - nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node) + nodeResult, err := getNodeRuntimeOperationErrorRate(ctx, m.client, node) if err != nil { framework.Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err) continue @@ -153,12 +153,12 @@ func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]Node } // GetLatestRuntimeOperationErrorRate gets latest error rate and timeout rate from last observed RuntimeOperationErrorRate. -func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate { +func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate(ctx context.Context) map[string]NodeRuntimeOperationErrorRate { result := make(map[string]NodeRuntimeOperationErrorRate) for node := range m.nodesRuntimeOps { result[node] = make(NodeRuntimeOperationErrorRate) oldNodeResult := m.nodesRuntimeOps[node] - curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node) + curNodeResult, err := getNodeRuntimeOperationErrorRate(ctx, m.client, node) if err != nil { framework.Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err) continue @@ -193,9 +193,9 @@ func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperation } // getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node. -func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) { +func getNodeRuntimeOperationErrorRate(ctx context.Context, c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) { result := make(NodeRuntimeOperationErrorRate) - ms, err := e2emetrics.GetKubeletMetrics(c, node) + ms, err := e2emetrics.GetKubeletMetrics(ctx, c, node) if err != nil { return result, err } @@ -225,8 +225,8 @@ func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeR } // GetStatsSummary contacts kubelet for the container information. -func GetStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) +func GetStatsSummary(ctx context.Context, c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) { + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() data, err := c.CoreV1().RESTClient().Get(). @@ -248,14 +248,14 @@ func GetStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alp return &summary, nil } -func getNodeStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) { +func getNodeStatsSummary(ctx context.Context, c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) { data, err := c.CoreV1().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, framework.KubeletPort)). Suffix("stats/summary"). SetHeader("Content-Type", "application/json"). - Do(context.TODO()).Raw() + Do(ctx).Raw() if err != nil { return nil, err @@ -318,8 +318,8 @@ func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerCo } // GetKubeletHeapStats returns stats of kubelet heap. -func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) { - client, err := ProxyRequest(c, nodeName, "debug/pprof/heap", framework.KubeletPort) +func GetKubeletHeapStats(ctx context.Context, c clientset.Interface, nodeName string) (string, error) { + client, err := ProxyRequest(ctx, c, nodeName, "debug/pprof/heap", framework.KubeletPort) if err != nil { return "", err } @@ -356,7 +356,7 @@ type resourceCollector struct { client clientset.Interface buffers map[string][]*ContainerResourceUsage pollingInterval time.Duration - stopCh chan struct{} + stop func() } func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { @@ -371,22 +371,23 @@ func newResourceCollector(c clientset.Interface, nodeName string, containerNames } // Start starts a goroutine to Poll the node every pollingInterval. -func (r *resourceCollector) Start() { - r.stopCh = make(chan struct{}, 1) +func (r *resourceCollector) Start(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + r.stop = cancel // Keep the last observed stats for comparison. oldStats := make(map[string]*kubeletstatsv1alpha1.ContainerStats) - go wait.Until(func() { r.collectStats(oldStats) }, r.pollingInterval, r.stopCh) + go wait.UntilWithContext(ctx, func(ctx context.Context) { r.collectStats(ctx, oldStats) }, r.pollingInterval) } // Stop sends a signal to terminate the stats collecting goroutine. func (r *resourceCollector) Stop() { - close(r.stopCh) + r.stop() } // collectStats gets the latest stats from kubelet stats summary API, computes // the resource usage, and pushes it to the buffer. -func (r *resourceCollector) collectStats(oldStatsMap map[string]*kubeletstatsv1alpha1.ContainerStats) { - summary, err := getNodeStatsSummary(r.client, r.node) +func (r *resourceCollector) collectStats(ctx context.Context, oldStatsMap map[string]*kubeletstatsv1alpha1.ContainerStats) { + summary, err := getNodeStatsSummary(ctx, r.client, r.node) if err != nil { framework.Logf("Error getting node stats summary on %q, err: %v", r.node, err) return @@ -486,9 +487,9 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI } // Start starts collectors. -func (r *ResourceMonitor) Start() { +func (r *ResourceMonitor) Start(ctx context.Context) { // It should be OK to monitor unschedulable Nodes - nodes, err := r.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := r.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("ResourceMonitor: unable to get list of nodes: %v", err) } @@ -496,7 +497,7 @@ func (r *ResourceMonitor) Start() { for _, node := range nodes.Items { collector := newResourceCollector(r.client, node.Name, r.containers, r.pollingInterval) r.collectors[node.Name] = collector - collector.Start() + collector.Start(ctx) } } diff --git a/test/e2e/framework/kubesystem/kubesystem.go b/test/e2e/framework/kubesystem/kubesystem.go index 5b8f1460dc9..c2e40e153e4 100644 --- a/test/e2e/framework/kubesystem/kubesystem.go +++ b/test/e2e/framework/kubesystem/kubesystem.go @@ -17,6 +17,7 @@ limitations under the License. package kubesystem import ( + "context" "fmt" "net" "strconv" @@ -27,7 +28,7 @@ import ( ) // RestartControllerManager restarts the kube-controller-manager. -func RestartControllerManager() error { +func RestartControllerManager(ctx context.Context) error { // TODO: Make it work for all providers and distros. if !framework.ProviderIs("gce", "aws") { return fmt.Errorf("unsupported provider for RestartControllerManager: %s", framework.TestContext.Provider) @@ -37,7 +38,7 @@ func RestartControllerManager() error { } cmd := "pidof kube-controller-manager | xargs sudo kill" framework.Logf("Restarting controller-manager via ssh, running: %v", cmd) - result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) return fmt.Errorf("couldn't restart controller-manager: %v", err) @@ -46,10 +47,10 @@ func RestartControllerManager() error { } // WaitForControllerManagerUp waits for the kube-controller-manager to be up. -func WaitForControllerManagerUp() error { +func WaitForControllerManagerUp(ctx context.Context) error { cmd := "curl -k https://localhost:" + strconv.Itoa(framework.KubeControllerManagerPort) + "/healthz" - for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) + for start := time.Now(); time.Since(start) < time.Minute && ctx.Err() == nil; time.Sleep(5 * time.Second) { + result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) } diff --git a/test/e2e/framework/manifest/manifest.go b/test/e2e/framework/manifest/manifest.go index 556de474cb2..1917fb70da6 100644 --- a/test/e2e/framework/manifest/manifest.go +++ b/test/e2e/framework/manifest/manifest.go @@ -17,6 +17,7 @@ limitations under the License. package manifest import ( + "context" "fmt" "io" "net/http" @@ -94,14 +95,19 @@ func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) { } // DaemonSetFromURL reads from a url and returns the daemonset in it. -func DaemonSetFromURL(url string) (*appsv1.DaemonSet, error) { +func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error) { framework.Logf("Parsing ds from %v", url) var response *http.Response var err error for i := 1; i <= 5; i++ { - response, err = http.Get(url) + request, reqErr := http.NewRequestWithContext(ctx, "GET", url, nil) + if reqErr != nil { + err = reqErr + continue + } + response, err = http.DefaultClient.Do(request) if err == nil && response.StatusCode == 200 { break } diff --git a/test/e2e/framework/metrics/api_server_metrics.go b/test/e2e/framework/metrics/api_server_metrics.go index 3b7e376ce6f..c22c5c45a00 100644 --- a/test/e2e/framework/metrics/api_server_metrics.go +++ b/test/e2e/framework/metrics/api_server_metrics.go @@ -43,8 +43,8 @@ func parseAPIServerMetrics(data string) (APIServerMetrics, error) { return result, nil } -func (g *Grabber) getMetricsFromAPIServer() (string, error) { - rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(context.TODO()).Raw() +func (g *Grabber) getMetricsFromAPIServer(ctx context.Context) (string, error) { + rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(ctx).Raw() if err != nil { return "", err } diff --git a/test/e2e/framework/metrics/grab.go b/test/e2e/framework/metrics/grab.go index 2264f9f3799..23e9ad2b4b9 100644 --- a/test/e2e/framework/metrics/grab.go +++ b/test/e2e/framework/metrics/grab.go @@ -17,24 +17,26 @@ limitations under the License. package metrics import ( + "context" + "github.com/onsi/ginkgo/v2" "k8s.io/kubernetes/test/e2e/framework" ) -func GrabBeforeEach(f *framework.Framework) (result *Collection) { +func GrabBeforeEach(ctx context.Context, f *framework.Framework) (result *Collection) { gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master" if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics { return nil } ginkgo.By("Gathering metrics before test", func() { - grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false) + grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false) if err != nil { framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err) return } - metrics, err := grabber.Grab() + metrics, err := grabber.Grab(ctx) if err != nil { framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err) return @@ -46,7 +48,7 @@ func GrabBeforeEach(f *framework.Framework) (result *Collection) { return } -func GrabAfterEach(f *framework.Framework, before *Collection) { +func GrabAfterEach(ctx context.Context, f *framework.Framework, before *Collection) { if framework.TestContext.GatherMetricsAfterTest == "false" { return } @@ -54,12 +56,12 @@ func GrabAfterEach(f *framework.Framework, before *Collection) { ginkgo.By("Gathering metrics after test", func() { // Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics. grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark") - grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) + grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) if err != nil { framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err) return } - received, err := grabber.Grab() + received, err := grabber.Grab(ctx) if err != nil { framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err) return diff --git a/test/e2e/framework/metrics/init/init.go b/test/e2e/framework/metrics/init/init.go index c89ee0db2da..4a7c8ec5527 100644 --- a/test/e2e/framework/metrics/init/init.go +++ b/test/e2e/framework/metrics/init/init.go @@ -19,6 +19,8 @@ limitations under the License. package init import ( + "context" + "github.com/onsi/ginkgo/v2" "k8s.io/kubernetes/test/e2e/framework" @@ -28,8 +30,8 @@ import ( func init() { framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions, func(f *framework.Framework) { - ginkgo.BeforeEach(func() { - metrics := e2emetrics.GrabBeforeEach(f) + ginkgo.BeforeEach(func(ctx context.Context) { + metrics := e2emetrics.GrabBeforeEach(ctx, f) ginkgo.DeferCleanup(e2emetrics.GrabAfterEach, f, metrics) }) }, diff --git a/test/e2e/framework/metrics/kubelet_metrics.go b/test/e2e/framework/metrics/kubelet_metrics.go index 4fb114f685e..5118f6aa050 100644 --- a/test/e2e/framework/metrics/kubelet_metrics.go +++ b/test/e2e/framework/metrics/kubelet_metrics.go @@ -68,8 +68,12 @@ func NewKubeletMetrics() KubeletMetrics { } // GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http. -func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, error) { - resp, err := http.Get(fmt.Sprintf("http://%s%s", nodeName, path)) +func GrabKubeletMetricsWithoutProxy(ctx context.Context, nodeName, path string) (KubeletMetrics, error) { + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://%s%s", nodeName, path), nil) + if err != nil { + return KubeletMetrics{}, err + } + resp, err := http.DefaultClient.Do(req) if err != nil { return KubeletMetrics{}, err } @@ -89,7 +93,7 @@ func parseKubeletMetrics(data string) (KubeletMetrics, error) { return result, nil } -func (g *Grabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, error) { +func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) { // There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock. finished := make(chan struct{}, 1) var err error @@ -100,7 +104,7 @@ func (g *Grabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)). Suffix("metrics"). - Do(context.TODO()).Raw() + Do(ctx).Raw() finished <- struct{}{} }() select { @@ -136,21 +140,21 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j]. // If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber; // or else, the function will try to get kubelet metrics directly from the node. -func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletMetrics, error) { +func getKubeletMetricsFromNode(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) { if c == nil { - return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics") + return GrabKubeletMetricsWithoutProxy(ctx, nodeName, "/metrics") } - grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false, false) + grabber, err := NewMetricsGrabber(ctx, c, nil, nil, true, false, false, false, false, false) if err != nil { return KubeletMetrics{}, err } - return grabber.GrabFromKubelet(nodeName) + return grabber.GrabFromKubelet(ctx, nodeName) } // GetKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims // the subsystem prefix. -func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics, error) { - ms, err := getKubeletMetricsFromNode(c, nodeName) +func GetKubeletMetrics(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) { + ms, err := getKubeletMetricsFromNode(ctx, c, nodeName) if err != nil { return KubeletMetrics{}, err } @@ -216,8 +220,8 @@ func GetKubeletLatencyMetrics(ms KubeletMetrics, filterMetricNames sets.String) } // HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics. -func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) { - ms, err := GetKubeletMetrics(c, nodeName) +func HighLatencyKubeletOperations(ctx context.Context, c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) { + ms, err := GetKubeletMetrics(ctx, c, nodeName) if err != nil { return KubeletLatencyMetrics{}, err } diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index 63c90c3ed53..f7c34d8ec77 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -88,7 +88,7 @@ type Grabber struct { // Collecting metrics data is an optional debug feature. Not all clusters will // support it. If disabled for a component, the corresponding Grab function // will immediately return an error derived from MetricsGrabbingDisabledError. -func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) { +func NewMetricsGrabber(ctx context.Context, c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) { kubeScheduler := "" kubeControllerManager := "" @@ -102,7 +102,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics") } - podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } @@ -132,18 +132,18 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re externalClient: ec, config: config, grabFromAPIServer: apiServer, - grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager), + grabFromControllerManager: checkPodDebugHandlers(ctx, c, controllers, "kube-controller-manager", kubeControllerManager), grabFromKubelets: kubelets, - grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler), + grabFromScheduler: checkPodDebugHandlers(ctx, c, scheduler, "kube-scheduler", kubeScheduler), grabFromClusterAutoscaler: clusterAutoscaler, - grabFromSnapshotController: checkPodDebugHandlers(c, snapshotController, "snapshot-controller", snapshotControllerManager), + grabFromSnapshotController: checkPodDebugHandlers(ctx, c, snapshotController, "snapshot-controller", snapshotControllerManager), kubeScheduler: kubeScheduler, kubeControllerManager: kubeControllerManager, snapshotController: snapshotControllerManager, }, nil } -func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool { +func checkPodDebugHandlers(ctx context.Context, c clientset.Interface, requested bool, component, podName string) bool { if !requested { return false } @@ -155,7 +155,7 @@ func checkPodDebugHandlers(c clientset.Interface, requested bool, component, pod // The debug handlers on the host where the pod runs might be disabled. // We can check that indirectly by trying to retrieve log output. limit := int64(1) - if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil { + if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(ctx); err != nil { klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.", podName, err, component) return false @@ -171,8 +171,8 @@ func (g *Grabber) HasControlPlanePods() bool { } // GrabFromKubelet returns metrics from kubelet -func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { - nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()}) +func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) { + nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()}) if err != nil { return KubeletMetrics{}, err } @@ -180,14 +180,14 @@ func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items) } kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port - return g.grabFromKubeletInternal(nodeName, int(kubeletPort)) + return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort)) } -func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) { +func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) { if kubeletPort <= 0 || kubeletPort > 65535 { return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort) } - output, err := g.getMetricsFromNode(nodeName, int(kubeletPort)) + output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort)) if err != nil { return KubeletMetrics{}, err } @@ -195,7 +195,7 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub } // GrabFromScheduler returns metrics from scheduler -func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { +func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, error) { if !g.grabFromScheduler { return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError) } @@ -203,7 +203,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { var err error g.waitForSchedulerReadyOnce.Do(func() { - if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr) } }) @@ -213,8 +213,8 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { var lastMetricsFetchErr error var output string - if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort) + if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) @@ -225,7 +225,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { } // GrabFromClusterAutoscaler returns metrics from cluster autoscaler -func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) { +func (g *Grabber) GrabFromClusterAutoscaler(ctx context.Context) (ClusterAutoscalerMetrics, error) { if !g.HasControlPlanePods() && g.externalClient == nil { return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError) } @@ -238,7 +238,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) client = g.client namespace = metav1.NamespaceSystem } - output, err := g.getMetricsFromPod(client, "cluster-autoscaler", namespace, 8085) + output, err := g.getMetricsFromPod(ctx, client, "cluster-autoscaler", namespace, 8085) if err != nil { return ClusterAutoscalerMetrics{}, err } @@ -246,7 +246,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) } // GrabFromControllerManager returns metrics from controller manager -func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) { +func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerManagerMetrics, error) { if !g.grabFromControllerManager { return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError) } @@ -254,7 +254,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) var err error g.waitForControllerManagerReadyOnce.Do(func() { - if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr) } }) @@ -264,8 +264,8 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) var output string var lastMetricsFetchErr error - if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort) + if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) @@ -276,7 +276,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) } // GrabFromSnapshotController returns metrics from controller manager -func (g *Grabber) GrabFromSnapshotController(podName string, port int) (SnapshotControllerMetrics, error) { +func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string, port int) (SnapshotControllerMetrics, error) { if !g.grabFromSnapshotController { return SnapshotControllerMetrics{}, fmt.Errorf("volume-snapshot-controller: %w", MetricsGrabbingDisabledError) } @@ -293,7 +293,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot var err error g.waitForSnapshotControllerReadyOnce.Do(func() { - if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr) } }) @@ -303,8 +303,8 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot var output string var lastMetricsFetchErr error - if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - output, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, port) + if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { err = fmt.Errorf("error waiting for volume-snapshot-controller pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) @@ -315,8 +315,8 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot } // GrabFromAPIServer returns metrics from API server -func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) { - output, err := g.getMetricsFromAPIServer() +func (g *Grabber) GrabFromAPIServer(ctx context.Context) (APIServerMetrics, error) { + output, err := g.getMetricsFromAPIServer(ctx) if err != nil { return APIServerMetrics{}, err } @@ -324,11 +324,11 @@ func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) { } // Grab returns metrics from corresponding component -func (g *Grabber) Grab() (Collection, error) { +func (g *Grabber) Grab(ctx context.Context) (Collection, error) { result := Collection{} var errs []error if g.grabFromAPIServer { - metrics, err := g.GrabFromAPIServer() + metrics, err := g.GrabFromAPIServer(ctx) if err != nil { errs = append(errs, err) } else { @@ -336,7 +336,7 @@ func (g *Grabber) Grab() (Collection, error) { } } if g.grabFromScheduler { - metrics, err := g.GrabFromScheduler() + metrics, err := g.GrabFromScheduler(ctx) if err != nil { errs = append(errs, err) } else { @@ -344,7 +344,7 @@ func (g *Grabber) Grab() (Collection, error) { } } if g.grabFromControllerManager { - metrics, err := g.GrabFromControllerManager() + metrics, err := g.GrabFromControllerManager(ctx) if err != nil { errs = append(errs, err) } else { @@ -352,7 +352,7 @@ func (g *Grabber) Grab() (Collection, error) { } } if g.grabFromSnapshotController { - metrics, err := g.GrabFromSnapshotController(g.snapshotController, snapshotControllerPort) + metrics, err := g.GrabFromSnapshotController(ctx, g.snapshotController, snapshotControllerPort) if err != nil { errs = append(errs, err) } else { @@ -360,7 +360,7 @@ func (g *Grabber) Grab() (Collection, error) { } } if g.grabFromClusterAutoscaler { - metrics, err := g.GrabFromClusterAutoscaler() + metrics, err := g.GrabFromClusterAutoscaler(ctx) if err != nil { errs = append(errs, err) } else { @@ -369,13 +369,13 @@ func (g *Grabber) Grab() (Collection, error) { } if g.grabFromKubelets { result.KubeletMetrics = make(map[string]KubeletMetrics) - nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { errs = append(errs, err) } else { for _, node := range nodes.Items { kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port - metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort)) + metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort)) if err != nil { errs = append(errs, err) } @@ -390,14 +390,14 @@ func (g *Grabber) Grab() (Collection, error) { } // getMetricsFromPod retrieves metrics data from an insecure port. -func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) { +func (g *Grabber) getMetricsFromPod(ctx context.Context, client clientset.Interface, podName string, namespace string, port int) (string, error) { rawOutput, err := client.CoreV1().RESTClient().Get(). Namespace(namespace). Resource("pods"). SubResource("proxy"). Name(fmt.Sprintf("%s:%d", podName, port)). Suffix("metrics"). - Do(context.TODO()).Raw() + Do(ctx).Raw() if err != nil { return "", err } @@ -409,7 +409,7 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, // similar to "kubectl port-forward" + "kubectl get --raw // https://localhost:/metrics". It uses the same credentials // as kubelet. -func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) { +func (g *Grabber) getSecureMetricsFromPod(ctx context.Context, podName string, namespace string, port int) (string, error) { dialer := e2epod.NewDialer(g.client, g.config) metricConfig := rest.CopyConfig(g.config) addr := e2epod.Addr{ @@ -444,7 +444,7 @@ func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port rawOutput, err := metricClient.RESTClient().Get(). AbsPath("metrics"). - Do(context.TODO()).Raw() + Do(ctx).Raw() if err != nil { return "", err } diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index 588037c5c33..c0e904c03a6 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -118,7 +118,7 @@ func EndpointsUseHostNetwork(config *NetworkingTestConfig) { } // NewNetworkingTestConfig creates and sets up a new test config helper. -func NewNetworkingTestConfig(f *framework.Framework, setters ...Option) *NetworkingTestConfig { +func NewNetworkingTestConfig(ctx context.Context, f *framework.Framework, setters ...Option) *NetworkingTestConfig { // default options config := &NetworkingTestConfig{ f: f, @@ -128,12 +128,12 @@ func NewNetworkingTestConfig(f *framework.Framework, setters ...Option) *Network setter(config) } ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) - config.setup(getServiceSelector()) + config.setup(ctx, getServiceSelector()) return config } // NewCoreNetworkingTestConfig creates and sets up a new test config helper for Node E2E. -func NewCoreNetworkingTestConfig(f *framework.Framework, hostNetwork bool) *NetworkingTestConfig { +func NewCoreNetworkingTestConfig(ctx context.Context, f *framework.Framework, hostNetwork bool) *NetworkingTestConfig { // default options config := &NetworkingTestConfig{ f: f, @@ -141,7 +141,7 @@ func NewCoreNetworkingTestConfig(f *framework.Framework, hostNetwork bool) *Netw HostNetwork: hostNetwork, } ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) - config.setupCore(getServiceSelector()) + config.setupCore(ctx, getServiceSelector()) return config } @@ -214,17 +214,17 @@ type NetexecDialResponse struct { } // DialFromEndpointContainer executes a curl via kubectl exec in an endpoint container. Returns an error to be handled by the caller. -func (config *NetworkingTestConfig) DialFromEndpointContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { - return config.DialFromContainer(protocol, echoHostname, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHTTPPort, targetPort, maxTries, minTries, expectedEps) +func (config *NetworkingTestConfig) DialFromEndpointContainer(ctx context.Context, protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { + return config.DialFromContainer(ctx, protocol, echoHostname, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHTTPPort, targetPort, maxTries, minTries, expectedEps) } // DialFromTestContainer executes a curl via kubectl exec in a test container. Returns an error to be handled by the caller. -func (config *NetworkingTestConfig) DialFromTestContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { - return config.DialFromContainer(protocol, echoHostname, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, maxTries, minTries, expectedEps) +func (config *NetworkingTestConfig) DialFromTestContainer(ctx context.Context, protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { + return config.DialFromContainer(ctx, protocol, echoHostname, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, maxTries, minTries, expectedEps) } // DialEchoFromTestContainer executes a curl via kubectl exec in a test container. The response is expected to match the echoMessage, Returns an error to be handled by the caller. -func (config *NetworkingTestConfig) DialEchoFromTestContainer(protocol, targetIP string, targetPort, maxTries, minTries int, echoMessage string) error { +func (config *NetworkingTestConfig) DialEchoFromTestContainer(ctx context.Context, protocol, targetIP string, targetPort, maxTries, minTries int, echoMessage string) error { expectedResponse := sets.NewString() expectedResponse.Insert(echoMessage) var dialCommand string @@ -238,7 +238,7 @@ func (config *NetworkingTestConfig) DialEchoFromTestContainer(protocol, targetIP } else { dialCommand = fmt.Sprintf("echo%%20%s", echoMessage) } - return config.DialFromContainer(protocol, dialCommand, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, maxTries, minTries, expectedResponse) + return config.DialFromContainer(ctx, protocol, dialCommand, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, maxTries, minTries, expectedResponse) } // diagnoseMissingEndpoints prints debug information about the endpoints that @@ -301,14 +301,14 @@ func makeCURLDialCommand(ipPort, dialCmd, protocol, targetIP string, targetPort // more for maxTries. Use this if you want to eg: fail a readiness check on a // pod and confirm it doesn't show up as an endpoint. // Returns nil if no error, or error message if failed after trying maxTries. -func (config *NetworkingTestConfig) DialFromContainer(protocol, dialCommand, containerIP, targetIP string, containerHTTPPort, targetPort, maxTries, minTries int, expectedResponses sets.String) error { +func (config *NetworkingTestConfig) DialFromContainer(ctx context.Context, protocol, dialCommand, containerIP, targetIP string, containerHTTPPort, targetPort, maxTries, minTries int, expectedResponses sets.String) error { ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort)) cmd := makeCURLDialCommand(ipPort, dialCommand, protocol, targetIP, targetPort) responses := sets.NewString() for i := 0; i < maxTries; i++ { - resp, err := config.GetResponseFromContainer(protocol, dialCommand, containerIP, targetIP, containerHTTPPort, targetPort) + resp, err := config.GetResponseFromContainer(ctx, protocol, dialCommand, containerIP, targetIP, containerHTTPPort, targetPort) if err != nil { // A failure to kubectl exec counts as a try, not a hard fail. // Also note that we will keep failing for maxTries in tests where @@ -342,8 +342,8 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, dialCommand, con } // GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container. -func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targetIP string, targetPort, tries int) (sets.String, error) { - return config.GetEndpointsFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, tries) +func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(ctx context.Context, protocol, targetIP string, targetPort, tries int) (sets.String, error) { + return config.GetEndpointsFromContainer(ctx, protocol, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, tries) } // GetEndpointsFromContainer executes a curl via kubectl exec in a test container, @@ -351,14 +351,14 @@ func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targ // in the url. It returns all different endpoints from multiple retries. // - tries is the number of curl attempts. If this many attempts pass and // we don't see any endpoints, the test fails. -func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containerIP, targetIP string, containerHTTPPort, targetPort, tries int) (sets.String, error) { +func (config *NetworkingTestConfig) GetEndpointsFromContainer(ctx context.Context, protocol, containerIP, targetIP string, containerHTTPPort, targetPort, tries int) (sets.String, error) { ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort)) cmd := makeCURLDialCommand(ipPort, "hostName", protocol, targetIP, targetPort) eps := sets.NewString() for i := 0; i < tries; i++ { - stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, config.f, config.TestContainerPod.Name, cmd) if err != nil { // A failure to kubectl exec counts as a try, not a hard fail. // Also note that we will keep failing for maxTries in tests where @@ -389,11 +389,11 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe } // GetResponseFromContainer executes a curl via kubectl exec in a container. -func (config *NetworkingTestConfig) GetResponseFromContainer(protocol, dialCommand, containerIP, targetIP string, containerHTTPPort, targetPort int) (NetexecDialResponse, error) { +func (config *NetworkingTestConfig) GetResponseFromContainer(ctx context.Context, protocol, dialCommand, containerIP, targetIP string, containerHTTPPort, targetPort int) (NetexecDialResponse, error) { ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort)) cmd := makeCURLDialCommand(ipPort, dialCommand, protocol, targetIP, targetPort) - stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, config.f, config.TestContainerPod.Name, cmd) if err != nil { return NetexecDialResponse{}, fmt.Errorf("failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr) } @@ -407,17 +407,17 @@ func (config *NetworkingTestConfig) GetResponseFromContainer(protocol, dialComma } // GetResponseFromTestContainer executes a curl via kubectl exec in a test container. -func (config *NetworkingTestConfig) GetResponseFromTestContainer(protocol, dialCommand, targetIP string, targetPort int) (NetexecDialResponse, error) { - return config.GetResponseFromContainer(protocol, dialCommand, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort) +func (config *NetworkingTestConfig) GetResponseFromTestContainer(ctx context.Context, protocol, dialCommand, targetIP string, targetPort int) (NetexecDialResponse, error) { + return config.GetResponseFromContainer(ctx, protocol, dialCommand, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort) } // GetHTTPCodeFromTestContainer executes a curl via kubectl exec in a test container and returns the status code. -func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP string, targetPort int) (int, error) { +func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(ctx context.Context, path, targetIP string, targetPort int) (int, error) { cmd := fmt.Sprintf("curl -g -q -s -o /dev/null -w %%{http_code} http://%s:%d%s", targetIP, targetPort, path) - stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, config.f, config.TestContainerPod.Name, cmd) // We only care about the status code reported by curl, // and want to return any other errors, such as cannot execute command in the Pod. // If curl failed to connect to host, it would exit with code 7, which makes `ExecShellInPodWithFullOutput` @@ -445,7 +445,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP // - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without // success on all endpoints). // In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic. -func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { +func (config *NetworkingTestConfig) DialFromNode(ctx context.Context, protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { var cmd string if protocol == "udp" { cmd = fmt.Sprintf("echo hostName | nc -w 1 -u %s %d", targetIP, targetPort) @@ -465,7 +465,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd) framework.Logf("Going to poll %v on port %v at least %v times, with a maximum of %v tries before failing", targetIP, targetPort, minTries, maxTries) for i := 0; i < maxTries; i++ { - stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.HostTestContainerPod.Name, filterCmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, config.f, config.HostTestContainerPod.Name, filterCmd) if err != nil || len(stderr) > 0 { // A failure to exec command counts as a try, not a hard fail. // Also note that we will keep failing for maxTries in tests where @@ -497,30 +497,30 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ // GetSelfURL executes a curl against the given path via kubectl exec into a // test container running with host networking, and fails if the output // doesn't match the expected string. -func (config *NetworkingTestConfig) GetSelfURL(port int32, path string, expected string) { +func (config *NetworkingTestConfig) GetSelfURL(ctx context.Context, port int32, path string, expected string) { cmd := fmt.Sprintf("curl -i -q -s --connect-timeout 1 http://localhost:%d%s", port, path) ginkgo.By(fmt.Sprintf("Getting kube-proxy self URL %s", path)) - config.executeCurlCmd(cmd, expected) + config.executeCurlCmd(ctx, cmd, expected) } // GetSelfURLStatusCode executes a curl against the given path via kubectl exec into a // test container running with host networking, and fails if the returned status // code doesn't match the expected string. -func (config *NetworkingTestConfig) GetSelfURLStatusCode(port int32, path string, expected string) { +func (config *NetworkingTestConfig) GetSelfURLStatusCode(ctx context.Context, port int32, path string, expected string) { // check status code cmd := fmt.Sprintf("curl -o /dev/null -i -q -s -w %%{http_code} --connect-timeout 1 http://localhost:%d%s", port, path) ginkgo.By(fmt.Sprintf("Checking status code against http://localhost:%d%s", port, path)) - config.executeCurlCmd(cmd, expected) + config.executeCurlCmd(ctx, cmd, expected) } -func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string) { +func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd string, expected string) { // These are arbitrary timeouts. The curl command should pass on first try, // unless remote server is starved/bootstrapping/restarting etc. const retryInterval = 1 * time.Second const retryTimeout = 30 * time.Second podName := config.HostTestContainerPod.Name var msg string - if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) { + if pollErr := wait.PollImmediateWithContext(ctx, retryInterval, retryTimeout, func(ctx context.Context) (bool, error) { stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd) if err != nil { msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err) @@ -700,41 +700,41 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se return res } -func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) { - config.NodePortService = config.CreateService(config.createNodePortServiceSpec(nodePortServiceName, selector, false)) +func (config *NetworkingTestConfig) createNodePortService(ctx context.Context, selector map[string]string) { + config.NodePortService = config.CreateService(ctx, config.createNodePortServiceSpec(nodePortServiceName, selector, false)) } -func (config *NetworkingTestConfig) createSessionAffinityService(selector map[string]string) { - config.SessionAffinityService = config.CreateService(config.createNodePortServiceSpec(sessionAffinityServiceName, selector, true)) +func (config *NetworkingTestConfig) createSessionAffinityService(ctx context.Context, selector map[string]string) { + config.SessionAffinityService = config.CreateService(ctx, config.createNodePortServiceSpec(sessionAffinityServiceName, selector, true)) } // DeleteNodePortService deletes NodePort service. -func (config *NetworkingTestConfig) DeleteNodePortService() { - err := config.getServiceClient().Delete(context.TODO(), config.NodePortService.Name, metav1.DeleteOptions{}) +func (config *NetworkingTestConfig) DeleteNodePortService(ctx context.Context) { + err := config.getServiceClient().Delete(ctx, config.NodePortService.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "error while deleting NodePortService. err:%v)", err) time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. } -func (config *NetworkingTestConfig) createTestPods() { +func (config *NetworkingTestConfig) createTestPods(ctx context.Context) { testContainerPod := config.createTestPodSpec() hostTestContainerPod := e2epod.NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork) - config.createPod(testContainerPod) + config.createPod(ctx, testContainerPod) if config.HostNetwork { - config.createPod(hostTestContainerPod) + config.createPod(ctx, hostTestContainerPod) } - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(config.f.ClientSet, testContainerPod.Name, config.f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, config.f.ClientSet, testContainerPod.Name, config.f.Namespace.Name)) var err error - config.TestContainerPod, err = config.getPodClient().Get(context.TODO(), testContainerPod.Name, metav1.GetOptions{}) + config.TestContainerPod, err = config.getPodClient().Get(ctx, testContainerPod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) } if config.HostNetwork { - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(config.f.ClientSet, hostTestContainerPod.Name, config.f.Namespace.Name)) - config.HostTestContainerPod, err = config.getPodClient().Get(context.TODO(), hostTestContainerPod.Name, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, config.f.ClientSet, hostTestContainerPod.Name, config.f.Namespace.Name)) + config.HostTestContainerPod, err = config.getPodClient().Get(ctx, hostTestContainerPod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) } @@ -742,14 +742,14 @@ func (config *NetworkingTestConfig) createTestPods() { } // CreateService creates the provided service in config.Namespace and returns created service -func (config *NetworkingTestConfig) CreateService(serviceSpec *v1.Service) *v1.Service { - _, err := config.getServiceClient().Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) +func (config *NetworkingTestConfig) CreateService(ctx context.Context, serviceSpec *v1.Service) *v1.Service { + _, err := config.getServiceClient().Create(ctx, serviceSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) - err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) + err = WaitForService(ctx, config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) - createdService, err := config.getServiceClient().Get(context.TODO(), serviceSpec.Name, metav1.GetOptions{}) + createdService, err := config.getServiceClient().Get(ctx, serviceSpec.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) return createdService @@ -757,13 +757,13 @@ func (config *NetworkingTestConfig) CreateService(serviceSpec *v1.Service) *v1.S // setupCore sets up the pods and core test config // mainly for simplified node e2e setup -func (config *NetworkingTestConfig) setupCore(selector map[string]string) { +func (config *NetworkingTestConfig) setupCore(ctx context.Context, selector map[string]string) { ginkgo.By("Creating the service pods in kubernetes") podName := "netserver" - config.EndpointPods = config.createNetProxyPods(podName, selector) + config.EndpointPods = config.createNetProxyPods(ctx, podName, selector) ginkgo.By("Creating test pods") - config.createTestPods() + config.createTestPods(ctx) epCount := len(config.EndpointPods) @@ -774,20 +774,20 @@ func (config *NetworkingTestConfig) setupCore(selector map[string]string) { } // setup includes setupCore and also sets up services -func (config *NetworkingTestConfig) setup(selector map[string]string) { - config.setupCore(selector) +func (config *NetworkingTestConfig) setup(ctx context.Context, selector map[string]string) { + config.setupCore(ctx, selector) ginkgo.By("Getting node addresses") - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) - nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, config.f.ClientSet, 10*time.Minute)) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, config.f.ClientSet) framework.ExpectNoError(err) e2eskipper.SkipUnlessNodeCountIsAtLeast(2) config.Nodes = nodeList.Items ginkgo.By("Creating the service on top of the pods in kubernetes") - config.createNodePortService(selector) - config.createSessionAffinityService(selector) + config.createNodePortService(ctx, selector) + config.createSessionAffinityService(ctx, selector) for _, p := range config.NodePortService.Spec.Ports { switch p.Protocol { @@ -830,16 +830,16 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) { } ginkgo.By("Waiting for NodePort service to expose endpoint") - err = framework.WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) + err = framework.WaitForServiceEndpointsNum(ctx, config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", nodePortServiceName, config.Namespace) ginkgo.By("Waiting for Session Affinity service to expose endpoint") - err = framework.WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, sessionAffinityServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) + err = framework.WaitForServiceEndpointsNum(ctx, config.f.ClientSet, config.Namespace, sessionAffinityServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", sessionAffinityServiceName, config.Namespace) } -func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod { - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) - nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount) +func (config *NetworkingTestConfig) createNetProxyPods(ctx context.Context, podName string, selector map[string]string) []*v1.Pod { + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, config.f.ClientSet, 10*time.Minute)) + nodeList, err := e2enode.GetBoundedReadySchedulableNodes(ctx, config.f.ClientSet, maxNetProxyPodsCount) framework.ExpectNoError(err) nodes := nodeList.Items @@ -856,15 +856,15 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector if pod.Spec.HostNetwork && framework.NodeOSDistroIs("windows") { e2epod.WithWindowsHostProcess(pod, "") } - createdPod := config.createPod(pod) + createdPod := config.createPod(ctx, pod) createdPods = append(createdPods, createdPod) } // wait that all of them are up runningPods := make([]*v1.Pod, 0, len(nodes)) for _, p := range createdPods { - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(config.f.ClientSet, p.Name, config.f.Namespace.Name, framework.PodStartTimeout)) - rp, err := config.getPodClient().Get(context.TODO(), p.Name, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, config.f.ClientSet, p.Name, config.f.Namespace.Name, framework.PodStartTimeout)) + rp, err := config.getPodClient().Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) runningPods = append(runningPods, rp) } @@ -873,17 +873,17 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector } // DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed. -func (config *NetworkingTestConfig) DeleteNetProxyPod() { +func (config *NetworkingTestConfig) DeleteNetProxyPod(ctx context.Context) { pod := config.EndpointPods[0] - config.getPodClient().Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. - err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) + err := e2epod.WaitForPodToDisappear(ctx, config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) if err != nil { framework.Failf("Failed to delete %s pod: %v", pod.Name, err) } // wait for endpoint being removed. - err = framework.WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) + err = framework.WaitForServiceEndpointsNum(ctx, config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) if err != nil { framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName) } @@ -891,8 +891,8 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() { time.Sleep(5 * time.Second) } -func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod { - return config.getPodClient().Create(pod) +func (config *NetworkingTestConfig) createPod(ctx context.Context, pod *v1.Pod) *v1.Pod { + return config.getPodClient().Create(ctx, pod) } func (config *NetworkingTestConfig) getPodClient() *e2epod.PodClient { @@ -1069,12 +1069,12 @@ func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Re // At the end (even in case of errors), the network traffic is brought back to normal. // This function executes commands on a node so it will work only for some // environments. -func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) { +func TestUnderTemporaryNetworkFailure(ctx context.Context, c clientset.Interface, ns string, node *v1.Node, testFunc func(ctx context.Context)) { host, err := e2enode.GetSSHExternalIP(node) if err != nil { framework.Failf("Error getting node external ip : %v", err) } - controlPlaneAddresses := framework.GetControlPlaneAddresses(c) + controlPlaneAddresses := framework.GetControlPlaneAddresses(ctx, c) ginkgo.By(fmt.Sprintf("block network traffic from node %s to the control plane", node.Name)) defer func() { // This code will execute even if setting the iptables rule failed. @@ -1083,24 +1083,24 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1 // separately, but I prefer to stay on the safe side). ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the control plane", node.Name)) for _, instanceAddress := range controlPlaneAddresses { - UnblockNetwork(host, instanceAddress) + UnblockNetwork(ctx, host, instanceAddress) } }() framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) - if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { + if !e2enode.WaitConditionToBe(ctx, c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } for _, instanceAddress := range controlPlaneAddresses { - BlockNetwork(host, instanceAddress) + BlockNetwork(ctx, host, instanceAddress) } framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) - if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) { + if !e2enode.WaitConditionToBe(ctx, c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) { framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) } - testFunc() + testFunc(ctx) // network traffic is unblocked in a deferred function } @@ -1122,18 +1122,18 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1 // BlockNetwork(from, to) // ... // } -func BlockNetwork(from string, to string) { +func BlockNetwork(ctx context.Context, from string, to string) { framework.Logf("block network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule) - if result, err := e2essh.SSH(dropCmd, from, framework.TestContext.Provider); result.Code != 0 || err != nil { + if result, err := e2essh.SSH(ctx, dropCmd, from, framework.TestContext.Provider); result.Code != 0 || err != nil { e2essh.LogResult(result) framework.Failf("Unexpected error: %v", err) } } // UnblockNetwork unblocks network between the given from value and the given to value. -func UnblockNetwork(from string, to string) { +func UnblockNetwork(ctx context.Context, from string, to string) { framework.Logf("Unblock network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule) @@ -1143,8 +1143,8 @@ func UnblockNetwork(from string, to string) { // not coming back. Subsequent tests will run or fewer nodes (some of the tests // may fail). Manual intervention is required in such case (recreating the // cluster solves the problem too). - err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) { - result, err := e2essh.SSH(undropCmd, from, framework.TestContext.Provider) + err := wait.PollWithContext(ctx, time.Millisecond*100, time.Second*30, func(ctx context.Context) (bool, error) { + result, err := e2essh.SSH(ctx, undropCmd, from, framework.TestContext.Provider) if result.Code == 0 && err == nil { return true, nil } @@ -1161,9 +1161,9 @@ func UnblockNetwork(from string, to string) { } // WaitForService waits until the service appears (exist == true), or disappears (exist == false) -func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func WaitForService(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + _, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) switch { case err == nil: framework.Logf("Service %s in namespace %s found.", name, namespace) diff --git a/test/e2e/framework/node/helper.go b/test/e2e/framework/node/helper.go index 945577ad3ad..b7c5ed99acb 100644 --- a/test/e2e/framework/node/helper.go +++ b/test/e2e/framework/node/helper.go @@ -39,16 +39,17 @@ const ( // WaitForAllNodesSchedulable waits up to timeout for all // (but TestContext.AllowedNotReadyNodes) to become schedulable. -func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error { +func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, timeout time.Duration) error { if framework.TestContext.AllowedNotReadyNodes == -1 { return nil } framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes) - return wait.PollImmediate( + return wait.PollImmediateWithContext( + ctx, 30*time.Second, timeout, - CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold), + CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold), ) } @@ -58,9 +59,9 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la } // ExpectNodeHasLabel expects that the given node has the given label pair. -func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { +func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName string, labelKey string, labelValue string) { ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue) - node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(node.Labels[labelKey], labelValue) } @@ -76,17 +77,17 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) } // ExpectNodeHasTaint expects that the node has the given taint. -func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) { +func ExpectNodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) { ginkgo.By("verifying the node has the taint " + taint.ToString()) - if has, err := NodeHasTaint(c, nodeName, taint); !has { + if has, err := NodeHasTaint(ctx, c, nodeName, taint); !has { framework.ExpectNoError(err) framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName) } } // NodeHasTaint returns true if the node has the given taint, else returns false. -func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) { - node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) { + node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -104,14 +105,14 @@ func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool // TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy, // and figure out how to do it in a configurable way, as we can't expect all setups to run // default test add-ons. -func AllNodesReady(c clientset.Interface, timeout time.Duration) error { - if err := allNodesReady(c, timeout); err != nil { +func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error { + if err := allNodesReady(ctx, c, timeout); err != nil { return fmt.Errorf("checking for ready nodes: %v", err) } return nil } -func allNodesReady(c clientset.Interface, timeout time.Duration) error { +func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error { if framework.TestContext.AllowedNotReadyNodes == -1 { return nil } @@ -119,10 +120,10 @@ func allNodesReady(c clientset.Interface, timeout time.Duration) error { framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes) var notReady []*v1.Node - err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/node/init/init.go b/test/e2e/framework/node/init/init.go index f5fc28aef4e..32398559444 100644 --- a/test/e2e/framework/node/init/init.go +++ b/test/e2e/framework/node/init/init.go @@ -18,6 +18,7 @@ limitations under the License. package init import ( + "context" "time" "github.com/onsi/ginkgo/v2" @@ -29,14 +30,14 @@ import ( func init() { framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions, func(f *framework.Framework) { - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if f.ClientSet == nil { // Test didn't reach f.BeforeEach, most // likely because the test got // skipped. Nothing to check... return } - e2enode.AllNodesReady(f.ClientSet, 3*time.Minute) + e2enode.AllNodesReady(ctx, f.ClientSet, 3*time.Minute) }) }, ) diff --git a/test/e2e/framework/node/node_killer.go b/test/e2e/framework/node/node_killer.go index 1634aeb4eb9..fafb6b6a648 100644 --- a/test/e2e/framework/node/node_killer.go +++ b/test/e2e/framework/node/node_killer.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "sync" "time" @@ -39,31 +40,31 @@ type NodeKiller struct { // NewNodeKiller creates new NodeKiller. func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller { - config.NodeKillerStopCh = make(chan struct{}) + config.NodeKillerStopCtx, config.NodeKillerStop = context.WithCancel(context.Background()) return &NodeKiller{config, client, provider} } // Run starts NodeKiller until stopCh is closed. -func (k *NodeKiller) Run(stopCh <-chan struct{}) { +func (k *NodeKiller) Run(ctx context.Context) { // wait.JitterUntil starts work immediately, so wait first. time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor)) - wait.JitterUntil(func() { - nodes := k.pickNodes() - k.kill(nodes) - }, k.config.Interval, k.config.JitterFactor, true, stopCh) + wait.JitterUntilWithContext(ctx, func(ctx context.Context) { + nodes := k.pickNodes(ctx) + k.kill(ctx, nodes) + }, k.config.Interval, k.config.JitterFactor, true) } -func (k *NodeKiller) pickNodes() []v1.Node { - nodes, err := GetReadySchedulableNodes(k.client) +func (k *NodeKiller) pickNodes(ctx context.Context) []v1.Node { + nodes, err := GetReadySchedulableNodes(ctx, k.client) framework.ExpectNoError(err) numNodes := int(k.config.FailureRatio * float64(len(nodes.Items))) - nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes) + nodes, err = GetBoundedReadySchedulableNodes(ctx, k.client, numNodes) framework.ExpectNoError(err) return nodes.Items } -func (k *NodeKiller) kill(nodes []v1.Node) { +func (k *NodeKiller) kill(ctx context.Context, nodes []v1.Node) { wg := sync.WaitGroup{} wg.Add(len(nodes)) for _, node := range nodes { @@ -73,7 +74,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) { defer wg.Done() framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name) - err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node) + err := e2essh.IssueSSHCommand(ctx, "sudo systemctl stop docker kubelet", k.provider, &node) if err != nil { framework.Logf("ERROR while stopping node %q: %v", node.Name, err) return @@ -82,7 +83,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) { time.Sleep(k.config.SimulatedDowntime) framework.Logf("Rebooting %q to repair the node", node.Name) - err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node) + err = e2essh.IssueSSHCommand(ctx, "sudo reboot", k.provider, &node) if err != nil { framework.Logf("ERROR while rebooting node %q: %v", node.Name, err) return diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index 468d5d56d5a..1cfdf8acd83 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -193,8 +193,8 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) { } // TotalRegistered returns number of schedulable Nodes. -func TotalRegistered(c clientset.Interface) (int, error) { - nodes, err := waitListSchedulableNodes(c) +func TotalRegistered(ctx context.Context, c clientset.Interface) (int, error) { + nodes, err := waitListSchedulableNodes(ctx, c) if err != nil { framework.Logf("Failed to list nodes: %v", err) return 0, err @@ -203,8 +203,8 @@ func TotalRegistered(c clientset.Interface) (int, error) { } // TotalReady returns number of ready schedulable Nodes. -func TotalReady(c clientset.Interface) (int, error) { - nodes, err := waitListSchedulableNodes(c) +func TotalReady(ctx context.Context, c clientset.Interface) (int, error) { + nodes, err := waitListSchedulableNodes(ctx, c) if err != nil { framework.Logf("Failed to list nodes: %v", err) return 0, err @@ -293,8 +293,8 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri } // PickIP picks one public node IP -func PickIP(c clientset.Interface) (string, error) { - publicIps, err := GetPublicIps(c) +func PickIP(ctx context.Context, c clientset.Interface) (string, error) { + publicIps, err := GetPublicIps(ctx, c) if err != nil { return "", fmt.Errorf("get node public IPs error: %s", err) } @@ -306,8 +306,8 @@ func PickIP(c clientset.Interface) (string, error) { } // GetPublicIps returns a public IP list of nodes. -func GetPublicIps(c clientset.Interface) ([]string, error) { - nodes, err := GetReadySchedulableNodes(c) +func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) { + nodes, err := GetReadySchedulableNodes(ctx, c) if err != nil { return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err) } @@ -324,8 +324,8 @@ func GetPublicIps(c clientset.Interface) ([]string, error) { // 2) Needs to be ready. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. // If there are no nodes that are both ready and schedulable, this will return an error. -func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) { - nodes, err = checkWaitListSchedulableNodes(c) +func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) { + nodes, err = checkWaitListSchedulableNodes(ctx, c) if err != nil { return nil, fmt.Errorf("listing schedulable nodes error: %s", err) } @@ -341,8 +341,8 @@ func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err er // GetBoundedReadySchedulableNodes is like GetReadySchedulableNodes except that it returns // at most maxNodes nodes. Use this to keep your test case from blowing up when run on a // large cluster. -func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) { - nodes, err = GetReadySchedulableNodes(c) +func GetBoundedReadySchedulableNodes(ctx context.Context, c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) { + nodes, err = GetReadySchedulableNodes(ctx, c) if err != nil { return nil, err } @@ -361,8 +361,8 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes // GetRandomReadySchedulableNode gets a single randomly-selected node which is available for // running pods on. If there are no available nodes it will return an error. -func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) { - nodes, err := GetReadySchedulableNodes(c) +func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (*v1.Node, error) { + nodes, err := GetReadySchedulableNodes(ctx, c) if err != nil { return nil, err } @@ -373,8 +373,8 @@ func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) { // There are cases when we care about tainted nodes // E.g. in tests related to nodes with gpu we care about nodes despite // presence of nvidia.com/gpu=present:NoSchedule taint -func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, err error) { - nodes, err = checkWaitListSchedulableNodes(c) +func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) { + nodes, err = checkWaitListSchedulableNodes(ctx, c) if err != nil { return nil, fmt.Errorf("listing schedulable nodes error: %s", err) } @@ -514,10 +514,10 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool { } // PodNodePairs return podNode pairs for all pods in a namespace -func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) { +func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) { var result []PodNode - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return result, err } @@ -533,8 +533,8 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) { } // GetClusterZones returns the values of zone label collected from all nodes. -func GetClusterZones(c clientset.Interface) (sets.String, error) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) { + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err) } @@ -554,9 +554,9 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) { } // GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable. -func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) { +func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) { // GetReadySchedulableNodes already filters our tainted and unschedulable nodes. - nodes, err := GetReadySchedulableNodes(c) + nodes, err := GetReadySchedulableNodes(ctx, c) if err != nil { return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err) } @@ -576,8 +576,8 @@ func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) { } // CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. -func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string { - nodes, err := GetBoundedReadySchedulableNodes(c, maxCount) +func CreatePodsPerNodeForSimpleApp(ctx context.Context, c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string { + nodes, err := GetBoundedReadySchedulableNodes(ctx, c, maxCount) // TODO use wrapper methods in expect.go after removing core e2e dependency on node gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) podLabels := map[string]string{ @@ -585,7 +585,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str } for i, node := range nodes.Items { framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) - _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{ + _, err := c.CoreV1().Pods(namespace).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(appName+"-pod-%v", i), Labels: podLabels, @@ -600,33 +600,33 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str // RemoveTaintsOffNode removes a list of taints from the given node // It is simply a helper wrapper for RemoveTaintOffNode -func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) { +func RemoveTaintsOffNode(ctx context.Context, c clientset.Interface, nodeName string, taints []v1.Taint) { for _, taint := range taints { - RemoveTaintOffNode(c, nodeName, taint) + RemoveTaintOffNode(ctx, c, nodeName, taint) } } // RemoveTaintOffNode removes the given taint from the given node. -func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) { - err := removeNodeTaint(c, nodeName, nil, &taint) +func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) { + err := removeNodeTaint(ctx, c, nodeName, nil, &taint) // TODO use wrapper methods in expect.go after removing core e2e dependency on node gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) - verifyThatTaintIsGone(c, nodeName, &taint) + verifyThatTaintIsGone(ctx, c, nodeName, &taint) } // AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint. -func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) { +func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) { // TODO use wrapper methods in expect.go after removing the dependency on this // package from the core e2e framework. - err := addOrUpdateTaintOnNode(c, nodeName, &taint) + err := addOrUpdateTaintOnNode(ctx, c, nodeName, &taint) gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) } // addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls // to update nodes; otherwise, no API calls. Return error if any. // copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode() -func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error { +func addOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error { if len(taints) == 0 { return nil } @@ -637,10 +637,10 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -661,7 +661,7 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v if !updated { return nil } - return patchNodeTaints(c, nodeName, oldNode, newNode) + return patchNodeTaints(ctx, c, nodeName, oldNode, newNode) }) } @@ -724,7 +724,7 @@ var semantic = conversion.EqualitiesOrDie( // won't fail if target taint doesn't exist or has been removed. // If passed a node it'll check if there's anything to be done, if taint is not present it won't issue // any API calls. -func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error { +func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error { if len(taints) == 0 { return nil } @@ -749,10 +749,10 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -773,12 +773,12 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain if !updated { return nil } - return patchNodeTaints(c, nodeName, oldNode, newNode) + return patchNodeTaints(ctx, c, nodeName, oldNode, newNode) }) } // patchNodeTaints patches node's taints. -func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { +func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { oldData, err := json.Marshal(oldNode) if err != nil { return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) @@ -797,7 +797,7 @@ func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } @@ -833,9 +833,9 @@ func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) return newTaints, deleted } -func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { +func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) { ginkgo.By("verifying the node doesn't have the taint " + taint.ToString()) - nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + nodeUpdated, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) // TODO use wrapper methods in expect.go after removing core e2e dependency on node gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) diff --git a/test/e2e/framework/node/ssh.go b/test/e2e/framework/node/ssh.go index 414d6ff5b12..2e0b02c0fe3 100644 --- a/test/e2e/framework/node/ssh.go +++ b/test/e2e/framework/node/ssh.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "time" "k8s.io/apimachinery/pkg/util/wait" @@ -25,7 +26,7 @@ import ( ) // WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod. -func WaitForSSHTunnels(namespace string) { +func WaitForSSHTunnels(ctx context.Context, namespace string) { framework.Logf("Waiting for SSH tunnels to establish") e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test", "--image=busybox", @@ -35,7 +36,7 @@ func WaitForSSHTunnels(namespace string) { defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test") // allow up to a minute for new ssh tunnels to establish - wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { + wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) { _, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test") return err == nil, nil }) diff --git a/test/e2e/framework/node/wait.go b/test/e2e/framework/node/wait.go index a3e32406229..60b79cd6477 100644 --- a/test/e2e/framework/node/wait.go +++ b/test/e2e/framework/node/wait.go @@ -40,21 +40,21 @@ var requiredPerNodePods = []*regexp.Regexp{ // WaitForReadyNodes waits up to timeout for cluster to has desired size and // there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes. -func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error { - _, err := CheckReady(c, size, timeout) +func WaitForReadyNodes(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) error { + _, err := CheckReady(ctx, c, size, timeout) return err } // WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them. -func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { +func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout time.Duration) error { framework.Logf("Waiting up to %v for all nodes to be ready", timeout) var notReady []v1.Node var missingPodsPerNode map[string][]string - err := wait.PollImmediate(poll, timeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -63,7 +63,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { notReady = append(notReady, node) } } - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -114,10 +114,10 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { // within timeout. If wantTrue is true, it will ensure the node condition status // is ConditionTrue; if it's false, it ensures the node condition is in any state // other than ConditionTrue (e.g. not true or unknown). -func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { +func WaitConditionToBe(ctx context.Context, c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Logf("Couldn't get node %s", name) continue @@ -134,20 +134,20 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node // WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the // readiness condition is anything but ready, e.g false or unknown) within // timeout. -func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool { - return WaitConditionToBe(c, name, v1.NodeReady, false, timeout) +func WaitForNodeToBeNotReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool { + return WaitConditionToBe(ctx, c, name, v1.NodeReady, false, timeout) } // WaitForNodeToBeReady returns whether node name is ready within timeout. -func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { - return WaitConditionToBe(c, name, v1.NodeReady, true, timeout) +func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool { + return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout) } // CheckReady waits up to timeout for cluster to has desired size and // there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes. -func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) { +func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) { - nodes, err := waitListSchedulableNodes(c) + nodes, err := waitListSchedulableNodes(ctx, c) if err != nil { framework.Logf("Failed to list nodes: %v", err) continue @@ -172,11 +172,11 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No } // waitListSchedulableNodes is a wrapper around listing nodes supporting retries. -func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { +func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error - if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { - nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) { + nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -190,8 +190,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { } // checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries. -func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { - nodes, err := waitListSchedulableNodes(c) +func checkWaitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { + nodes, err := waitListSchedulableNodes(ctx, c) if err != nil { return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err) } @@ -199,9 +199,9 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) } // CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests). -func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) { +func CheckReadyForTests(ctx context.Context, c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func(ctx context.Context) (bool, error) { attempt := 0 - return func() (bool, error) { + return func(ctx context.Context) (bool, error) { if allowedNotReadyNodes == -1 { return true, nil } @@ -212,7 +212,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed // remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic. FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), } - allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts) + allNodes, err := c.CoreV1().Nodes().List(ctx, opts) if err != nil { var terminalListNodesErr error framework.Logf("Unexpected error listing nodes: %v", err) diff --git a/test/e2e/framework/node/wait_test.go b/test/e2e/framework/node/wait_test.go index 3b3373de891..f917d40d6d2 100644 --- a/test/e2e/framework/node/wait_test.go +++ b/test/e2e/framework/node/wait_test.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "errors" "testing" @@ -167,12 +168,12 @@ func TestCheckReadyForTests(t *testing.T) { nodeList := &v1.NodeList{Items: tc.nodes} return true, nodeList, tc.nodeListErr }) - checkFunc := CheckReadyForTests(c, tc.nonblockingTaints, tc.allowedNotReadyNodes, testLargeClusterThreshold) + checkFunc := CheckReadyForTests(context.Background(), c, tc.nonblockingTaints, tc.allowedNotReadyNodes, testLargeClusterThreshold) // The check function returns "false, nil" during its // first two calls, therefore we have to try several // times until we get the expected error. for attempt := 0; attempt <= 3; attempt++ { - out, err := checkFunc() + out, err := checkFunc(context.Background()) expected := tc.expected expectedErr := tc.expectedErr if tc.nodeListErr != nil && attempt < 2 { diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index fe817be8c55..168724def55 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -52,19 +52,19 @@ type Config struct { } // CreateUnschedulablePod with given claims based on node selector -func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { +func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to become Unschedulable - err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace) + err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } @@ -72,24 +72,24 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe } // CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed. -func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { - return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") +func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { + return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") } // CreatePod with given claims based on node selector -func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { +func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } @@ -97,29 +97,29 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st } // CreateSecPod creates security pod with given claims -func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { - return CreateSecPodWithNodeSelection(client, podConfig, timeout) +func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { + return CreateSecPodWithNodeSelection(ctx, client, podConfig, timeout) } // CreateSecPodWithNodeSelection creates security pod with given claims -func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { +func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { pod, err := MakeSecPod(podConfig) if err != nil { return nil, fmt.Errorf("Unable to create pod: %v", err) } - pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout) + err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } diff --git a/test/e2e/framework/pod/delete.go b/test/e2e/framework/pod/delete.go index 07e4237e0bf..f25e6fad120 100644 --- a/test/e2e/framework/pod/delete.go +++ b/test/e2e/framework/pod/delete.go @@ -37,9 +37,9 @@ const ( // DeletePodOrFail deletes the pod of the specified namespace and name. Resilient to the pod // not existing. -func DeletePodOrFail(c clientset.Interface, ns, name string) { +func DeletePodOrFail(ctx context.Context, c clientset.Interface, ns, name string) { ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) - err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{}) if err != nil && apierrors.IsNotFound(err) { return } @@ -49,18 +49,18 @@ func DeletePodOrFail(c clientset.Interface, ns, name string) { // DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod // not existing. -func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error { +func DeletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error { if pod == nil { return nil } - return DeletePodWithWaitByName(c, pod.GetName(), pod.GetNamespace()) + return DeletePodWithWaitByName(ctx, c, pod.GetName(), pod.GetNamespace()) } // DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod // not existing. -func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error { +func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error { framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace) - err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{}) + err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, metav1.DeleteOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil // assume pod was already deleted @@ -68,7 +68,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string return fmt.Errorf("pod Delete API error: %v", err) } framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName) - err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout) + err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout) if err != nil { return fmt.Errorf("pod %q was not deleted: %v", podName, err) } @@ -76,14 +76,14 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string } // DeletePodWithGracePeriod deletes the passed-in pod. Resilient to the pod not existing. -func DeletePodWithGracePeriod(c clientset.Interface, pod *v1.Pod, grace int64) error { - return DeletePodWithGracePeriodByName(c, pod.GetName(), pod.GetNamespace(), grace) +func DeletePodWithGracePeriod(ctx context.Context, c clientset.Interface, pod *v1.Pod, grace int64) error { + return DeletePodWithGracePeriodByName(ctx, c, pod.GetName(), pod.GetNamespace(), grace) } // DeletePodsWithGracePeriod deletes the passed-in pods. Resilient to the pods not existing. -func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64) error { +func DeletePodsWithGracePeriod(ctx context.Context, c clientset.Interface, pods []v1.Pod, grace int64) error { for _, pod := range pods { - if err := DeletePodWithGracePeriod(c, &pod, grace); err != nil { + if err := DeletePodWithGracePeriod(ctx, c, &pod, grace); err != nil { return err } } @@ -91,9 +91,9 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64 } // DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing. -func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error { +func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface, podName, podNamespace string, grace int64) error { framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace) - err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace)) + err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, *metav1.NewDeleteOptions(grace)) if err != nil { if apierrors.IsNotFound(err) { return nil // assume pod was already deleted diff --git a/test/e2e/framework/pod/exec_util.go b/test/e2e/framework/pod/exec_util.go index a88aee2d7dd..e2e00de8a41 100644 --- a/test/e2e/framework/pod/exec_util.go +++ b/test/e2e/framework/pod/exec_util.go @@ -87,6 +87,7 @@ func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, strin // ExecCommandInContainerWithFullOutput executes a command in the // specified container and return stdout, stderr and error func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) { + // TODO (pohly): add context support return ExecWithOptions(f, ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, @@ -114,28 +115,28 @@ func ExecShellInContainer(f *framework.Framework, podName, containerName string, return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd) } -func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string { - pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) +func execCommandInPod(ctx context.Context, f *framework.Framework, podName string, cmd ...string) string { + pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %v", podName) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...) } -func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) { - pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) +func execCommandInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd ...string) (string, string, error) { + pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %v", podName) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...) } // ExecShellInPod executes the specified command on the pod. -func ExecShellInPod(f *framework.Framework, podName string, cmd string) string { - return execCommandInPod(f, podName, "/bin/sh", "-c", cmd) +func ExecShellInPod(ctx context.Context, f *framework.Framework, podName string, cmd string) string { + return execCommandInPod(ctx, f, podName, "/bin/sh", "-c", cmd) } // ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error. -func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) { - return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd) +func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd string) (string, string, error) { + return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd) } func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { diff --git a/test/e2e/framework/pod/output/output.go b/test/e2e/framework/pod/output/output.go index 856d99678be..5a2e762933f 100644 --- a/test/e2e/framework/pod/output/output.go +++ b/test/e2e/framework/pod/output/output.go @@ -128,8 +128,8 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err } // DumpDebugInfo dumps debug info of tests. -func DumpDebugInfo(c clientset.Interface, ns string) { - sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) +func DumpDebugInfo(ctx context.Context, c clientset.Interface, ns string) { + sl, _ := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name) framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) @@ -142,6 +142,7 @@ func DumpDebugInfo(c clientset.Interface, ns string) { // MatchContainerOutput creates a pod and waits for all it's containers to exit with success. // It then tests that the matcher with each expectedOutput matches the output of the specified container. func MatchContainerOutput( + ctx context.Context, f *framework.Framework, pod *v1.Pod, containerName string, @@ -153,17 +154,17 @@ func MatchContainerOutput( } podClient := e2epod.PodClientNS(f, ns) - createdPod := podClient.Create(pod) + createdPod := podClient.Create(ctx, pod) defer func() { ginkgo.By("delete the pod") - podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) }() // Wait for client pod to complete. - podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart) + podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart) // Grab its logs. Get host first. - podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{}) + podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get pod status: %v", err) } @@ -171,7 +172,7 @@ func MatchContainerOutput( if podErr != nil { // Pod failed. Dump all logs from all containers to see what's wrong _ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool { - logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, c.Name) if err != nil { framework.Logf("Failed to get logs from node %q pod %q container %q: %v", podStatus.Spec.NodeName, podStatus.Name, c.Name, err) @@ -187,7 +188,7 @@ func MatchContainerOutput( podStatus.Spec.NodeName, podStatus.Name, containerName, err) // Sometimes the actual containers take a second to get started, try to get logs for 60s - logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, containerName) if err != nil { framework.Logf("Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) @@ -210,21 +211,21 @@ func MatchContainerOutput( // TestContainerOutput runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using a substring matcher. -func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) { - TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring) +func TestContainerOutput(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) { + TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring) } // TestContainerOutputRegexp runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using a regexp matcher. -func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) { - TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp) +func TestContainerOutputRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) { + TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp) } // TestContainerOutputMatcher runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using the given matcher. -func TestContainerOutputMatcher(f *framework.Framework, +func TestContainerOutputMatcher(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, @@ -234,5 +235,5 @@ func TestContainerOutputMatcher(f *framework.Framework, if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) { framework.Failf("Invalid container index: %d", containerIndex) } - framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher)) + framework.ExpectNoError(MatchContainerOutput(ctx, f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher)) } diff --git a/test/e2e/framework/pod/pod_client.go b/test/e2e/framework/pod/pod_client.go index 54b52dd2351..54937df8313 100644 --- a/test/e2e/framework/pod/pod_client.go +++ b/test/e2e/framework/pod/pod_client.go @@ -93,26 +93,26 @@ type PodClient struct { } // Create creates a new pod according to the framework specifications (don't wait for it to start). -func (c *PodClient) Create(pod *v1.Pod) *v1.Pod { +func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod { c.mungeSpec(pod) - p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) + p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Pod") return p } // CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready. -func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod { +func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod { namespace := c.f.Namespace.Name - p := c.Create(pod) - framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout)) + p := c.Create(ctx, pod) + framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout)) // Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip. - p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := c.Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return p } // CreateBatch create a batch of pods. All pods are created before waiting. -func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { +func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod { ps := make([]*v1.Pod, len(pods)) var wg sync.WaitGroup for i, pod := range pods { @@ -120,7 +120,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { go func(i int, pod *v1.Pod) { defer wg.Done() defer ginkgo.GinkgoRecover() - ps[i] = c.CreateSync(pod) + ps[i] = c.CreateSync(ctx, pod) }(i, pod) } wg.Wait() @@ -130,14 +130,14 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { // Update updates the pod object. It retries if there is a conflict, throw out error if // there is any other apierrors. name is the pod name, updateFn is the function updating the // pod object. -func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { - framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { - pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{}) +func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) { + framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) { + pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get pod %q: %v", name, err) } updateFn(pod) - _, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{}) + _, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{}) if err == nil { framework.Logf("Successfully updated pod %q", name) return true, nil @@ -151,7 +151,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { } // AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running. -func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error { +func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error { namespace := c.f.Namespace.Name podJS, err := json.Marshal(pod) @@ -166,23 +166,23 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod)) // Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled. - if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil { + if _, err := c.Patch(ctx, pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil { return err } - framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout)) + framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, namespace, pod.Name, ec.Name, timeout)) return nil } // DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // disappear before the timeout, it will fail the test. -func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) { +func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.DeleteOptions, timeout time.Duration) { namespace := c.f.Namespace.Name - err := c.Delete(context.TODO(), name, options) + err := c.Delete(ctx, name, options) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Failed to delete pod %q: %v", name, err) } - gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), + gomega.Expect(WaitForPodToDisappear(ctx, c.f.ClientSet, namespace, name, labels.Everything(), 2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name) } @@ -224,9 +224,9 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) { // WaitForSuccess waits for pod to succeed. // TODO(random-liu): Move pod wait function into this file -func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { +func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout time.Duration) { f := c.f - gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, + gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -241,9 +241,9 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { } // WaitForFinish waits for pod to finish running, regardless of success or failure. -func (c *PodClient) WaitForFinish(name string, timeout time.Duration) { +func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time.Duration) { f := c.f - gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, + gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -258,9 +258,9 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) { } // WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod. -func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) { +func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) { var ev *v1.Event - err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { + err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) { evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) @@ -282,9 +282,9 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) { } // MatchContainerOutput gets output of a container and match expected regexp in the output. -func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error { +func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, containerName string, expectedRegexp string) error { f := c.f - output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName) + output, err := GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, name, containerName) if err != nil { return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name) } @@ -299,16 +299,16 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe } // PodIsReady returns true if the specified pod is ready. Otherwise false. -func (c *PodClient) PodIsReady(name string) bool { - pod, err := c.Get(context.TODO(), name, metav1.GetOptions{}) +func (c *PodClient) PodIsReady(ctx context.Context, name string) bool { + pod, err := c.Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) return podutils.IsPodReady(pod) } // RemovePodFinalizer removes the pod's finalizer -func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) { +func (c *PodClient) RemoveFinalizer(ctx context.Context, podName string, finalizerName string) { framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName) - c.Update(podName, func(pod *v1.Pod) { + c.Update(ctx, podName, func(pod *v1.Pod) { pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil) }) } diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index 64c3447f8e6..8c5b3b268ff 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -96,10 +96,10 @@ func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele // CheckAllResponses issues GETs to all pods in the context and verify they // reply with their own pod name. -func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { +func (r ProxyResponseChecker) CheckAllResponses(ctx context.Context) (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} - currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options) + currentPods, err := r.c.CoreV1().Pods(r.ns).List(ctx, options) expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. @@ -107,7 +107,7 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) } - ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout) defer cancel() body, err := r.c.CoreV1().RESTClient().Get(). @@ -115,11 +115,11 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { Resource("pods"). SubResource("proxy"). Name(string(pod.Name)). - Do(ctx). + Do(ctxUntil). Raw() if err != nil { - if ctx.Err() != nil { + if ctxUntil.Err() != nil { // We may encounter errors here because of a race between the pod readiness and apiserver // proxy. So, we log the error and retry if this occurs. framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) @@ -159,19 +159,19 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { } // PodsCreated returns a pod list matched by the given name. -func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { +func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - return PodsCreatedByLabel(c, ns, name, replicas, label) + return PodsCreatedByLabel(ctx, c, ns, name, replicas, label) } // PodsCreatedByLabel returns a created pod list matched by the given label. -func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { +func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { options := metav1.ListOptions{LabelSelector: label.String()} // List the pods, making sure we observe all the replicas. - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns).List(ctx, options) if err != nil { return nil, err } @@ -194,26 +194,26 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, } // VerifyPods checks if the specified pod is responding. -func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { - return podRunningMaybeResponding(c, ns, name, wantName, replicas, true) +func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error { + return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, true) } // VerifyPodsRunning checks if the specified pod is running. -func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { - return podRunningMaybeResponding(c, ns, name, wantName, replicas, false) +func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error { + return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, false) } -func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error { - pods, err := PodsCreated(c, ns, name, replicas) +func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error { + pods, err := PodsCreated(ctx, c, ns, name, replicas) if err != nil { return err } - e := podsRunning(c, pods) + e := podsRunning(ctx, c, pods) if len(e) > 0 { return fmt.Errorf("failed to wait for pods running: %v", e) } if checkResponding { - err = PodsResponding(c, ns, name, wantName, pods) + err = PodsResponding(ctx, c, ns, name, wantName, pods) if err != nil { return fmt.Errorf("failed to wait for pods responding: %v", err) } @@ -221,7 +221,7 @@ func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName return nil } -func podsRunning(c clientset.Interface, pods *v1.PodList) []error { +func podsRunning(ctx context.Context, c clientset.Interface, pods *v1.PodList) []error { // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. ginkgo.By("ensuring each pod is running") @@ -230,7 +230,7 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error { for _, pod := range pods.Items { go func(p v1.Pod) { - errorChan <- WaitForPodRunningInNamespace(c, &p) + errorChan <- WaitForPodRunningInNamespace(ctx, c, &p) }(pod) } @@ -302,7 +302,7 @@ func logPodTerminationMessages(pods []v1.Pod) { // We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to // logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into: // {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt -func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) { +func logPodLogs(ctx context.Context, c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) { if reportDir == "" { return } @@ -328,7 +328,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi for i := 0; i < maxPods; i++ { pod := logPods[i] for _, container := range pod.Spec.Containers { - logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen) + logs, err := getPodLogsInternal(ctx, c, namespace, pod.Name, container.Name, false, nil, &tailLen) if err != nil { framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err) continue @@ -351,14 +351,14 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi } // DumpAllPodInfoForNamespace logs all pod information for a given namespace. -func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) { - pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) +func DumpAllPodInfoForNamespace(ctx context.Context, c clientset.Interface, namespace, reportDir string) { + pods, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("unable to fetch pod debug info: %v", err) } LogPodStates(pods.Items) logPodTerminationMessages(pods.Items) - logPodLogs(c, namespace, pods.Items, reportDir) + logPodLogs(ctx, c, namespace, pods.Items, reportDir) } // FilterNonRestartablePods filters out pods that will never get recreated if @@ -459,15 +459,15 @@ func newExecPodSpec(ns, generateName string) *v1.Pod { // CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands. // Pod name is uniquely generated. -func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod { +func CreateExecPodOrFail(ctx context.Context, client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod { framework.Logf("Creating new exec pod") pod := newExecPodSpec(ns, generateName) if tweak != nil { tweak(pod) } - execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + execPod, err := client.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) expectNoError(err, "failed to create new exec pod in namespace: %s", ns) - err = WaitForPodNameRunningInNamespace(client, execPod.Name, execPod.Namespace) + err = WaitForPodNameRunningInNamespace(ctx, client, execPod.Name, execPod.Namespace) expectNoError(err, "failed to create new exec pod in namespace: %s", ns) return execPod } @@ -497,20 +497,20 @@ func WithWindowsHostProcess(pod *v1.Pod, username string) { // CheckPodsRunningReady returns whether all pods whose names are listed in // podNames in namespace ns are running and ready, using c and waiting at most // timeout. -func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") +func CheckPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { + return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. -func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") +func CheckPodsRunningReadyOrSucceeded(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { + return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // checkPodsCondition returns whether all pods whose names are listed in podNames // in namespace ns are in the condition, using c and waiting at most timeout. -func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { +func checkPodsCondition(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { np := len(podNames) framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) type waitPodResult struct { @@ -521,7 +521,7 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim for _, podName := range podNames { // Launch off pod readiness checkers. go func(name string) { - err := WaitForPodCondition(c, ns, name, desc, timeout, condition) + err := WaitForPodCondition(ctx, c, ns, name, desc, timeout, condition) result <- waitPodResult{err == nil, name} }(podName) } @@ -539,24 +539,24 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim } // GetPodLogs returns the logs of the specified container (namespace/pod/container). -func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil) +func GetPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, nil, nil) } // GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp. -func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) { +func GetPodLogsSince(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) { sinceTime := metav1.NewTime(since) - return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil) + return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, &sinceTime, nil) } // GetPreviousPodLogs returns the logs of the previous instance of the // specified container (namespace/pod/container). -func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil) +func GetPreviousPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(ctx, c, namespace, podName, containerName, true, nil, nil) } // utility function for gomega Eventually -func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) { +func getPodLogsInternal(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) { request := c.CoreV1().RESTClient().Get(). Resource("pods"). Namespace(namespace). @@ -569,7 +569,7 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName if tailLines != nil { request.Param("tailLines", strconv.Itoa(*tailLines)) } - logs, err := request.Do(context.TODO()).Raw() + logs, err := request.Do(ctx).Raw() if err != nil { return "", err } @@ -580,8 +580,8 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName } // GetPodsInNamespace returns the pods in the given namespace. -func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) +func GetPodsInNamespace(ctx context.Context, c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { + pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return []*v1.Pod{}, err } @@ -598,10 +598,10 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin } // GetPods return the label matched pods in the given ns -func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) { +func GetPods(ctx context.Context, c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) { label := labels.SelectorFromSet(matchLabels) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts) + pods, err := c.CoreV1().Pods(ns).List(ctx, listOpts) if err != nil { return []v1.Pod{}, err } @@ -609,13 +609,13 @@ func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([ } // GetPodSecretUpdateTimeout returns the timeout duration for updating pod secret. -func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { +func GetPodSecretUpdateTimeout(ctx context.Context, c clientset.Interface) time.Duration { // With SecretManager(ConfigMapManager), we may have to wait up to full sync period + // TTL of secret(configmap) to elapse before the Kubelet projects the update into the // volume and the container picks it up. // So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for // secret(configmap) that's based on cluster size + additional time as a fudge factor. - secretTTL, err := getNodeTTLAnnotationValue(c) + secretTTL, err := getNodeTTLAnnotationValue(ctx, c) if err != nil { framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err) } @@ -624,16 +624,16 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { } // VerifyPodHasConditionWithType verifies the pod has the expected condition by type -func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) +func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) { + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name) if condition := FindPodConditionByType(&pod.Status, cType); condition == nil { framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status) } } -func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) { + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil || len(nodes.Items) == 0 { return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err) } diff --git a/test/e2e/framework/pod/resource_test.go b/test/e2e/framework/pod/resource_test.go index 639b7c96d4e..c78f5486366 100644 --- a/test/e2e/framework/pod/resource_test.go +++ b/test/e2e/framework/pod/resource_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "reflect" "testing" @@ -52,7 +53,7 @@ func TestGetPodsInNamespace(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cs := fakeclient.NewSimpleClientset(tt.pods...) - got, err := GetPodsInNamespace(cs, "", map[string]string{}) + got, err := GetPodsInNamespace(context.Background(), cs, "", map[string]string{}) if (err != nil) != tt.expectErr { t.Errorf("expectErr = %v, but got err = %v", tt.expectErr, err) } diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index 24ba76805df..3473cabed1b 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -180,7 +180,7 @@ func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState stri // // If minPods or allowedNotReadyPods are -1, this method returns immediately // without waiting. -func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { +func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { if minPods == -1 || allowedNotReadyPods == -1 { return nil } @@ -195,7 +195,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN notReady := int32(0) var lastAPIError error - if wait.PollImmediate(poll, timeout, func() (bool, error) { + if wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { // We get the new list of pods, replication controllers, and // replica sets in every iteration because more pods come // online during startup and we want to ensure they are also @@ -204,7 +204,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN // Clear API error from the last attempt in case the following calls succeed. lastAPIError = nil - rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{}) + rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{}) lastAPIError = err if err != nil { return handleWaitingAPIError(err, false, "listing replication controllers in namespace %s", ns) @@ -214,7 +214,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN replicaOk += rc.Status.ReadyReplicas } - rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{}) + rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{}) lastAPIError = err if err != nil { return handleWaitingAPIError(err, false, "listing replication sets in namespace %s", ns) @@ -224,7 +224,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN replicaOk += rs.Status.ReadyReplicas } - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) lastAPIError = err if err != nil { return handleWaitingAPIError(err, false, "listing pods in namespace %s", ns) @@ -280,15 +280,15 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN // WaitForPodCondition waits a pods to be matched to the given condition. // If the condition callback returns an error that matches FinalErr (checked with IsFinal), // then polling aborts early. -func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error { +func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error { framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc) var ( lastPodError error lastPod *v1.Pod start = time.Now() ) - err := wait.PollImmediate(poll, timeout, func() (bool, error) { - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) lastPodError = err if err != nil { return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName)) @@ -329,12 +329,12 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin // WaitForAllPodsCondition waits for the listed pods to match the given condition. // To succeed, at least minPods must be listed, and all listed pods must match the condition. -func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) { +func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) { framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc) var pods *v1.PodList matched := 0 - err := wait.PollImmediate(poll, timeout, func() (done bool, err error) { - pods, err = c.CoreV1().Pods(ns).List(context.TODO(), opts) + err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (done bool, err error) { + pods, err = c.CoreV1().Pods(ns).List(ctx, opts) if err != nil { return handleWaitingAPIError(err, true, "listing pods") } @@ -440,8 +440,8 @@ func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, t // terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully // terminated (reason==""), but may be called to detect if a pod did *not* terminate according to // the supplied reason. -func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { - return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("terminated with reason %s", reason), podStartTimeout, func(pod *v1.Pod) (bool, error) { +func WaitForPodTerminatedInNamespace(ctx context.Context, c clientset.Interface, podName, reason, namespace string) error { + return WaitForPodCondition(ctx, c, namespace, podName, fmt.Sprintf("terminated with reason %s", reason), podStartTimeout, func(pod *v1.Pod) (bool, error) { // Only consider Failed pods. Successful pods will be deleted and detected in // waitForPodCondition's Get call returning `IsNotFound` if pod.Status.Phase == v1.PodFailed { @@ -455,8 +455,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam } // WaitForPodTerminatingInNamespaceTimeout returns if the pod is terminating, or an error if it is not after the timeout. -func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) { +func WaitForPodTerminatingInNamespaceTimeout(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(ctx, c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) { if pod.DeletionTimestamp != nil { return true, nil } @@ -465,8 +465,8 @@ func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, nam } // WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. -func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { +func WaitForPodSuccessInNamespaceTimeout(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(ctx, c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { if pod.Spec.RestartPolicy == v1.RestartPolicyAlways { return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) } @@ -486,8 +486,8 @@ func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespa // and have condition Status equal to Unschedulable, // if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason. // Typically called to test that the passed-in pod is Pending and Unschedulable. -func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitForPodCondition(c, namespace, podName, v1.PodReasonUnschedulable, podStartTimeout, func(pod *v1.Pod) (bool, error) { +func WaitForPodNameUnschedulableInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string) error { + return WaitForPodCondition(ctx, c, namespace, podName, v1.PodReasonUnschedulable, podStartTimeout, func(pod *v1.Pod) (bool, error) { // Only consider Failed pods. Successful pods will be deleted and detected in // waitForPodCondition's Get call returning `IsNotFound` if pod.Status.Phase == v1.PodPending { @@ -506,20 +506,20 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name // WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout) +func WaitForPodNameRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodRunningInNamespace(ctx, c, podName, namespace, podStartTimeout) } // WaitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) +func WaitForPodRunningInNamespaceSlow(ctx context.Context, c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodRunningInNamespace(ctx, c, podName, namespace, slowPodStartTimeout) } // WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. -func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, "running", timeout, func(pod *v1.Pod) (bool, error) { +func WaitTimeoutForPodRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(ctx, c, namespace, podName, "running", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodRunning: return true, nil @@ -534,16 +534,16 @@ func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespa // WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { +func WaitForPodRunningInNamespace(ctx context.Context, c clientset.Interface, pod *v1.Pod) error { if pod.Status.Phase == v1.PodRunning { return nil } - return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, podStartTimeout) + return WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, podStartTimeout) } // WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop. -func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, "completed", timeout, func(pod *v1.Pod) (bool, error) { +func WaitTimeoutForPodNoLongerRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(ctx, c, namespace, podName, "completed", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed, v1.PodSucceeded: return true, nil @@ -554,14 +554,14 @@ func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, // WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. -func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout) +func WaitForPodNoLongerRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodNoLongerRunningInNamespace(ctx, c, podName, namespace, defaultPodDeletionTimeout) } // WaitTimeoutForPodReadyInNamespace waits the given timeout duration for the // specified pod to be ready and running. -func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) { +func WaitTimeoutForPodReadyInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(ctx, c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status) @@ -581,8 +581,8 @@ func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace // WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. -func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { - return WaitForPodCondition(c, ns, podName, "not pending", podStartTimeout, func(pod *v1.Pod) (bool, error) { +func WaitForPodNotPending(ctx context.Context, c clientset.Interface, ns, podName string) error { + return WaitForPodCondition(ctx, c, ns, podName, "not pending", podStartTimeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodPending: return false, nil @@ -593,23 +593,23 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { } // WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. -func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { - return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout) +func WaitForPodSuccessInNamespace(ctx context.Context, c clientset.Interface, podName string, namespace string) error { + return WaitForPodSuccessInNamespaceTimeout(ctx, c, podName, namespace, podStartTimeout) } // WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. -func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { - return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) +func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface, podName string, namespace string) error { + return WaitForPodSuccessInNamespaceTimeout(ctx, c, podName, namespace, slowPodStartTimeout) } // WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. // Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get // api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other // than "not found" then that error is returned and the wait stops. -func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { +func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error { var lastPod *v1.Pod - err := wait.PollImmediate(poll, timeout, func() (bool, error) { - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done } @@ -631,12 +631,12 @@ func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, ti } // WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. -func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { +func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { var lastPod *v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { framework.Logf("Waiting for pod %s to disappear", podName) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns).List(ctx, options) if err != nil { return handleWaitingAPIError(err, true, "listing pods") } @@ -667,20 +667,20 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe } // PodsResponding waits for the pods to response. -func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { +func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { ginkgo.By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err := wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) + err := wait.PollImmediateWithContext(ctx, poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) return maybeTimeoutError(err, "waiting for pods to be responsive") } // WaitForNumberOfPods waits up to timeout to ensure there are exact // `num` pods in namespace `ns`. // It returns the matching Pods or a timeout error. -func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) { +func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) { actualNum := 0 - err = wait.PollImmediate(poll, timeout, func() (bool, error) { - pods, err = c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + err = wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + pods, err = c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return handleWaitingAPIError(err, false, "listing pods") } @@ -692,9 +692,9 @@ func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time // WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one // matching pod exists. Return the list of matching pods. -func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { +func WaitForPodsWithLabelScheduled(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { opts := metav1.ListOptions{LabelSelector: label.String()} - return WaitForAllPodsCondition(c, ns, opts, 1, "scheduled", podScheduledBeforeTimeout, func(pod *v1.Pod) (bool, error) { + return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "scheduled", podScheduledBeforeTimeout, func(pod *v1.Pod) (bool, error) { if pod.Spec.NodeName == "" { return false, nil } @@ -703,26 +703,26 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label } // WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label -func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) { +func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) { opts := metav1.ListOptions{LabelSelector: label.String()} - return WaitForAllPodsCondition(c, ns, opts, 1, "existent", podListTimeout, func(pod *v1.Pod) (bool, error) { + return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "existent", podListTimeout, func(pod *v1.Pod) (bool, error) { return true, nil }) } // WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready. // Return the list of matching pods. -func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { +func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { opts := metav1.ListOptions{LabelSelector: label.String()} - return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady) + return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady) } // WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them, // returning their names if it can do so before timeout. -func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { +func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { var pods []*v1.Pod var errLast error - found := wait.Poll(poll, timeout, func() (bool, error) { + found := wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { allPods := ps.List() pods = FilterNonRestartablePods(allPods) if len(pods) != expect { @@ -746,9 +746,9 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du // WaitForPodContainerToFail waits for the given Pod container to fail with the given reason, specifically due to // invalid container configuration. In this case, the container will remain in a waiting state with a specific // reason set, which should match the given reason. -func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error { +func WaitForPodContainerToFail(ctx context.Context, c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error { conditionDesc := fmt.Sprintf("container %d failed with reason %s", containerIndex, reason) - return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { + return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodPending: if len(pod.Status.ContainerStatuses) == 0 { @@ -767,9 +767,9 @@ func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, } // WaitForPodContainerStarted waits for the given Pod container to start, after a successful run of the startupProbe. -func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error { +func WaitForPodContainerStarted(ctx context.Context, c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error { conditionDesc := fmt.Sprintf("container %d started", containerIndex) - return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { + return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { if containerIndex > len(pod.Status.ContainerStatuses)-1 { return false, nil } @@ -779,9 +779,9 @@ func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string } // WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden". -func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error { +func WaitForPodFailedReason(ctx context.Context, c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error { conditionDesc := fmt.Sprintf("failed with reason %s", reason) - return WaitForPodCondition(c, pod.Namespace, pod.Name, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { + return WaitForPodCondition(ctx, c, pod.Namespace, pod.Name, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodSucceeded: return true, errors.New("pod succeeded unexpectedly") @@ -797,9 +797,9 @@ func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, t } // WaitForContainerRunning waits for the given Pod container to have a state of running -func WaitForContainerRunning(c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error { +func WaitForContainerRunning(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error { conditionDesc := fmt.Sprintf("container %s running", containerName) - return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { + return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) { for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} { for _, cs := range statuses { if cs.Name == containerName { diff --git a/test/e2e/framework/pod/wait_test.go b/test/e2e/framework/pod/wait_test.go index 22cd8c5bb73..24322e97e4b 100644 --- a/test/e2e/framework/pod/wait_test.go +++ b/test/e2e/framework/pod/wait_test.go @@ -53,11 +53,11 @@ import ( var _ = ginkgo.Describe("pod", func() { ginkgo.It("not found", func(ctx context.Context) { - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(clientSet, "no-such-pod", "default", timeout /* no explanation here to cover that code path */)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, "no-such-pod", "default", timeout /* no explanation here to cover that code path */)) }) ginkgo.It("not running", func(ctx context.Context) { - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */) }) }) diff --git a/test/e2e/framework/provider.go b/test/e2e/framework/provider.go index cd98219dfdf..6a463f719a4 100644 --- a/test/e2e/framework/provider.go +++ b/test/e2e/framework/provider.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "fmt" "os" "sync" @@ -100,12 +101,12 @@ type ProviderInterface interface { CreateShare() (string, string, string, error) DeleteShare(accountName, shareName string) error - CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) - DeletePVSource(pvSource *v1.PersistentVolumeSource) error + CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) + DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error - CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) + CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) - EnsureLoadBalancerResourcesDeleted(ip, portRange string) error + EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error LoadBalancerSrcRanges() []string EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) } @@ -159,21 +160,21 @@ func (n NullProvider) DeletePD(pdName string) error { } // CreatePVSource is a base implementation which creates PV source. -func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { +func (n NullProvider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) { return nil, fmt.Errorf("Provider not supported") } // DeletePVSource is a base implementation which deletes PV source. -func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { +func (n NullProvider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error { return fmt.Errorf("Provider not supported") } // CleanupServiceResources is a base implementation which cleans up service resources. -func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { +func (n NullProvider) CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) { } // EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted. -func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { +func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error { return nil } diff --git a/test/e2e/framework/providers/aws/aws.go b/test/e2e/framework/providers/aws/aws.go index a13807d414e..3fe12a9f04c 100644 --- a/test/e2e/framework/providers/aws/aws.go +++ b/test/e2e/framework/providers/aws/aws.go @@ -17,6 +17,7 @@ limitations under the License. package aws import ( + "context" "fmt" "strings" @@ -163,7 +164,7 @@ func (p *Provider) DeletePD(pdName string) error { } // CreatePVSource creates a persistent volume source -func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { +func (p *Provider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) { return &v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: diskName, @@ -173,8 +174,8 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo } // DeletePVSource deletes a persistent volume source -func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { - return e2epv.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID) +func (p *Provider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error { + return e2epv.DeletePDWithRetry(ctx, pvSource.AWSElasticBlockStore.VolumeID) } func newAWSClient(zone string) *ec2.EC2 { diff --git a/test/e2e/framework/providers/gce/firewall.go b/test/e2e/framework/providers/gce/firewall.go index 76478598f9e..f8850b92b5a 100644 --- a/test/e2e/framework/providers/gce/firewall.go +++ b/test/e2e/framework/providers/gce/firewall.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "fmt" "net/http" "strconv" @@ -394,12 +395,12 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset } // WaitForFirewallRule waits for the specified firewall existence -func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) { +func WaitForFirewallRule(ctx context.Context, gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) { framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist) var fw *compute.Firewall var err error - condition := func() (bool, error) { + condition := func(ctx context.Context) (bool, error) { fw, err = gceCloud.GetFirewall(fwName) if err != nil && exist || err == nil && !exist || @@ -409,7 +410,7 @@ func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, ti return true, nil } - if err := wait.PollImmediate(5*time.Second, timeout, condition); err != nil { + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, condition); err != nil { return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist) } return fw, nil diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go index 4b62ee1bd57..4d69bbb8b04 100644 --- a/test/e2e/framework/providers/gce/gce.go +++ b/test/e2e/framework/providers/gce/gce.go @@ -185,14 +185,14 @@ func (p *Provider) GroupSize(group string) (int, error) { } // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created -func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { +func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error { project := framework.TestContext.CloudConfig.ProjectID region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone) if err != nil { return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err) } - return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { + return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { computeservice := p.gceCloud.ComputeServices().GA list, err := computeservice.ForwardingRules.List(project, region).Do() if err != nil { @@ -268,7 +268,7 @@ func (p *Provider) DeletePD(pdName string) error { } // CreatePVSource creates a persistent volume source -func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) { +func (p *Provider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) { return &v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: diskName, @@ -279,16 +279,16 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo } // DeletePVSource deletes a persistent volume source -func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { - return e2epv.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName) +func (p *Provider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error { + return e2epv.DeletePDWithRetry(ctx, pvSource.GCEPersistentDisk.PDName) } // CleanupServiceResources cleans up GCE Service Type=LoadBalancer resources with // the given name. The name is usually the UUID of the Service prefixed with an // alpha-numeric character ('a') to work around cloudprovider rules. -func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { - if pollErr := wait.Poll(5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) { - if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil { +func (p *Provider) CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) { + if pollErr := wait.PollWithContext(ctx, 5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func(ctx context.Context) (bool, error) { + if err := p.cleanupGCEResources(ctx, c, loadBalancerName, region, zone); err != nil { framework.Logf("Still waiting for glbc to cleanup: %v", err) return false, nil } @@ -298,7 +298,7 @@ func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerNa } } -func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) { +func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) (retErr error) { if region == "" { // Attempt to parse region from zone if no region is given. var err error @@ -320,7 +320,7 @@ func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { retErr = fmt.Errorf("%v\n%v", retErr, err) } - clusterID, err := GetClusterID(c) + clusterID, err := GetClusterID(ctx, c) if err != nil { retErr = fmt.Errorf("%v\n%v", retErr, err) return @@ -401,8 +401,8 @@ func GetGCECloud() (*gcecloud.Cloud, error) { } // GetClusterID returns cluster ID -func GetClusterID(c clientset.Interface) (string, error) { - cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), gcecloud.UIDConfigMapName, metav1.GetOptions{}) +func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) { + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{}) if err != nil || cm == nil { return "", fmt.Errorf("error getting cluster ID: %v", err) } diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index fa8d15df553..b3e92928cbd 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -77,14 +77,14 @@ type IngressController struct { } // CleanupIngressController calls cont.CleanupIngressControllerWithTimeout with hard-coded timeout -func (cont *IngressController) CleanupIngressController() error { - return cont.CleanupIngressControllerWithTimeout(e2eservice.LoadBalancerCleanupTimeout) +func (cont *IngressController) CleanupIngressController(ctx context.Context) error { + return cont.CleanupIngressControllerWithTimeout(ctx, e2eservice.LoadBalancerCleanupTimeout) } // CleanupIngressControllerWithTimeout calls the IngressController.Cleanup(false) // followed with deleting the static ip, and then a final IngressController.Cleanup(true) -func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.Duration) error { - pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) { +func (cont *IngressController) CleanupIngressControllerWithTimeout(ctx context.Context, timeout time.Duration) error { + pollErr := wait.PollWithContext(ctx, 5*time.Second, timeout, func(ctx context.Context) (bool, error) { if err := cont.Cleanup(false); err != nil { framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err) return false, nil @@ -105,7 +105,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time. // controller. Delete this IP only after the controller has had a chance // to cleanup or it might interfere with the controller, causing it to // throw out confusing events. - if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { + if ipErr := wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) { if err := cont.deleteStaticIPs(); err != nil { framework.Logf("Failed to delete static-ip: %v\n", err) return false, nil @@ -125,9 +125,9 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time. return nil } -func (cont *IngressController) getL7AddonUID() (string, error) { +func (cont *IngressController) getL7AddonUID(ctx context.Context) (string, error) { framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) - cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), uidConfigMap, metav1.GetOptions{}) + cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, uidConfigMap, metav1.GetOptions{}) if err != nil { return "", err } @@ -604,8 +604,8 @@ func (cont *IngressController) isHTTPErrorCode(err error, code int) bool { } // WaitForNegBackendService waits for the expected backend service to become -func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.ServicePort) error { - return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { +func (cont *IngressController) WaitForNegBackendService(ctx context.Context, svcPorts map[string]v1.ServicePort) error { + return wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) { err := cont.verifyBackendMode(svcPorts, negBackend) if err != nil { framework.Logf("Err while checking if backend service is using NEG: %v", err) @@ -616,8 +616,8 @@ func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.S } // WaitForIgBackendService returns true only if all global backend service with matching svcPorts pointing to IG as backend -func (cont *IngressController) WaitForIgBackendService(svcPorts map[string]v1.ServicePort) error { - return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { +func (cont *IngressController) WaitForIgBackendService(ctx context.Context, svcPorts map[string]v1.ServicePort) error { + return wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) { err := cont.verifyBackendMode(svcPorts, igBackend) if err != nil { framework.Logf("Err while checking if backend service is using IG: %v", err) @@ -745,8 +745,8 @@ func (cont *IngressController) Cleanup(del bool) error { } // Init initializes the IngressController with an UID -func (cont *IngressController) Init() error { - uid, err := cont.getL7AddonUID() +func (cont *IngressController) Init(ctx context.Context) error { + uid, err := cont.getL7AddonUID(ctx) if err != nil { return err } diff --git a/test/e2e/framework/providers/gce/util.go b/test/e2e/framework/providers/gce/util.go index 88dcaab4945..9b31a4cbf9d 100644 --- a/test/e2e/framework/providers/gce/util.go +++ b/test/e2e/framework/providers/gce/util.go @@ -80,12 +80,12 @@ func RecreateNodes(c clientset.Interface, nodes []v1.Node) error { } // WaitForNodeBootIdsToChange waits for the boot ids of the given nodes to change in order to verify the node has been recreated. -func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout time.Duration) error { +func WaitForNodeBootIdsToChange(ctx context.Context, c clientset.Interface, nodes []v1.Node, timeout time.Duration) error { errMsg := []string{} for i := range nodes { node := &nodes[i] - if err := wait.Poll(30*time.Second, timeout, func() (bool, error) { - newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + if err := wait.PollWithContext(ctx, 30*time.Second, timeout, func(ctx context.Context) (bool, error) { + newNode, err := c.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second) return false, nil diff --git a/test/e2e/framework/providers/gcp.go b/test/e2e/framework/providers/gcp.go index 86e843d5be0..80cce680360 100644 --- a/test/e2e/framework/providers/gcp.go +++ b/test/e2e/framework/providers/gcp.go @@ -17,6 +17,7 @@ limitations under the License. package providers import ( + "context" "fmt" "os" "path" @@ -58,7 +59,7 @@ func LocationParamGKE() string { } // MasterUpgradeGKE upgrades master node to the specified version on GKE. -func MasterUpgradeGKE(namespace string, v string) error { +func MasterUpgradeGKE(ctx context.Context, namespace string, v string) error { framework.Logf("Upgrading master to %q", v) args := []string{ "container", @@ -76,7 +77,7 @@ func MasterUpgradeGKE(namespace string, v string) error { return err } - e2enode.WaitForSSHTunnels(namespace) + e2enode.WaitForSSHTunnels(ctx, namespace) return nil } diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index fa02940773d..4fbddf91a1d 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -135,11 +135,11 @@ type PersistentVolumeClaimConfig struct { // PVPVCCleanup cleans up a pv and pvc in a single pv/pvc test case. // Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv. -func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error { +func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error { var errs []error if pvc != nil { - err := DeletePersistentVolumeClaim(c, pvc.Name, ns) + err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns) if err != nil { errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err)) } @@ -147,7 +147,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc framework.Logf("pvc is nil") } if pv != nil { - err := DeletePersistentVolume(c, pv.Name) + err := DeletePersistentVolume(ctx, c, pv.Name) if err != nil { errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err)) } @@ -160,11 +160,11 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc // PVPVCMapCleanup Cleans up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are // deleted as long as the Delete api call succeeds. // Note: delete errors are appended to []error so that as many pvcs and pvs as possible are deleted. -func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error { +func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error { var errs []error for pvcKey := range claims { - err := DeletePersistentVolumeClaim(c, pvcKey.Name, ns) + err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns) if err != nil { errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err)) } else { @@ -173,7 +173,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa } for pvKey := range pvols { - err := DeletePersistentVolume(c, pvKey) + err := DeletePersistentVolume(ctx, c, pvKey) if err != nil { errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err)) } else { @@ -184,10 +184,10 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa } // DeletePersistentVolume deletes the PV. -func DeletePersistentVolume(c clientset.Interface, pvName string) error { +func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName string) error { if c != nil && len(pvName) > 0 { framework.Logf("Deleting PersistentVolume %q", pvName) - err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, metav1.DeleteOptions{}) + err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PV Delete API error: %v", err) } @@ -196,10 +196,10 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error { } // DeletePersistentVolumeClaim deletes the Claim. -func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error { +func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvcName string, ns string) error { if c != nil && len(pvcName) > 0 { framework.Logf("Deleting PersistentVolumeClaim %q", pvcName) - err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{}) + err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PVC Delete API error: %v", err) } @@ -210,23 +210,23 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin // DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV // has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which // phase value to expect for the pv bound to the to-be-deleted claim. -func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error { +func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error { pvname := pvc.Spec.VolumeName framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname) - err := DeletePersistentVolumeClaim(c, pvc.Name, ns) + err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns) if err != nil { return err } // Wait for the PV's phase to return to be `expectPVPhase` framework.Logf("Waiting for reclaim process to complete.") - err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim) + err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim) if err != nil { return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err) } // examine the pv's ClaimRef and UID and compare to expected values - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -254,11 +254,11 @@ func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutCo // Note: if there are more claims than pvs then some of the remaining claims may bind to just made // // available pvs. -func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error { +func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error { var boundPVs, deletedPVCs int for pvName := range pvols { - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -273,9 +273,9 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey) } // get the pvc for the delete call below - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, cr.Name, metav1.GetOptions{}) if err == nil { - if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil { + if err = DeletePVCandValidatePV(ctx, c, timeouts, ns, pvc, pv, expectPVPhase); err != nil { return err } } else if !apierrors.IsNotFound(err) { @@ -294,11 +294,11 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time } // create the PV resource. Fails test on error. -func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { +func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { var resultPV *v1.PersistentVolume var lastCreateErr error - err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) { - resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) { + resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) if lastCreateErr != nil { // If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP. // If quota failure strings are found for other platforms, they can be added to improve reliability when running @@ -326,13 +326,13 @@ func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1. } // CreatePV creates the PV resource. Fails test on error. -func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { - return createPV(c, timeouts, pv) +func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { + return createPV(ctx, c, timeouts, pv) } // CreatePVC creates the PVC resource. Fails test on error. -func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) +func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("PVC Create API error: %v", err) } @@ -346,7 +346,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) // // known until after the PVC is instantiated. This is why the pvc is created // before the pv. -func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { +func CreatePVCPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { // make the pvc spec pvc := MakePersistentVolumeClaim(pvcConfig, ns) preBindMsg := "" @@ -358,7 +358,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo pv := MakePersistentVolume(pvConfig) ginkgo.By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg)) - pvc, err := CreatePVC(c, ns, pvc) + pvc, err := CreatePVC(ctx, c, ns, pvc) if err != nil { return nil, nil, err } @@ -367,7 +367,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo if preBind { pv.Spec.ClaimRef.Name = pvc.Name } - pv, err = createPV(c, timeouts, pv) + pv, err = createPV(ctx, c, timeouts, pv) if err != nil { return nil, pvc, err } @@ -382,7 +382,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo // // known until after the PV is instantiated. This is why the pv is created // before the pvc. -func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { +func CreatePVPVC(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { preBindMsg := "" if preBind { preBindMsg = " pre-bound" @@ -394,7 +394,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo pvc := MakePersistentVolumeClaim(pvcConfig, ns) // instantiate the pv - pv, err := createPV(c, timeouts, pv) + pv, err := createPV(ctx, c, timeouts, pv) if err != nil { return nil, nil, err } @@ -402,7 +402,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo if preBind { pvc.Spec.VolumeName = pv.Name } - pvc, err = CreatePVC(c, ns, pvc) + pvc, err = CreatePVC(ctx, c, ns, pvc) if err != nil { return pv, nil, err } @@ -417,7 +417,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo // Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However, // // orphaned pvs are not deleted and will remain after the suite completes. -func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) { +func CreatePVsPVCs(ctx context.Context, numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) { pvMap := make(PVMap, numpvs) pvcMap := make(PVCMap, numpvcs) extraPVCs := 0 @@ -430,7 +430,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo // create pvs and pvcs for i := 0; i < pvsToCreate; i++ { - pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false) + pv, pvc, err := CreatePVPVC(ctx, c, timeouts, pvConfig, pvcConfig, ns, false) if err != nil { return pvMap, pvcMap, err } @@ -441,7 +441,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo // create extra pvs or pvcs as needed for i := 0; i < extraPVs; i++ { pv := MakePersistentVolume(pvConfig) - pv, err := createPV(c, timeouts, pv) + pv, err := createPV(ctx, c, timeouts, pv) if err != nil { return pvMap, pvcMap, err } @@ -449,7 +449,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo } for i := 0; i < extraPVCs; i++ { pvc := MakePersistentVolumeClaim(pvcConfig, ns) - pvc, err := CreatePVC(c, ns, pvc) + pvc, err := CreatePVC(ctx, c, ns, pvc) if err != nil { return pvMap, pvcMap, err } @@ -459,27 +459,27 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo } // WaitOnPVandPVC waits for the pv and pvc to bind to each other. -func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error { +func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error { // Wait for newly created PVC to bind to the PV framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) - err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound) + err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound) if err != nil { return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err) } // Wait for PersistentVolume.Status.Phase to be Bound, which it should be // since the PVC is already bound. - err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound) + err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound) if err != nil { return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err) } // Re-get the pv and pvc objects - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PVC Get API error: %v", err) } @@ -508,7 +508,7 @@ func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, n // to situations where the maximum wait times are reached several times in succession, // extending test time. Thus, it is recommended to keep the delta between PVs and PVCs // small. -func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error { +func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error { var actualBinds int expectedBinds := len(pvols) if expectedBinds > len(claims) { // want the min of # pvs or #pvcs @@ -516,7 +516,7 @@ func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContex } for pvName := range pvols { - err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound) + err := WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound) if err != nil && len(pvols) > len(claims) { framework.Logf("WARN: pv %v is not bound after max wait", pvName) framework.Logf(" This may be ok since there are more pvs than pvcs") @@ -526,7 +526,7 @@ func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContex return fmt.Errorf("PV %q did not become Bound: %v", pvName, err) } - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -539,7 +539,7 @@ func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContex return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey) } - err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound) + err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound) if err != nil { return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err) } @@ -659,10 +659,15 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P } } -func createPDWithRetry(zone string) (string, error) { +func createPDWithRetry(ctx context.Context, zone string) (string, error) { var err error var newDiskName string - for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) { + for start := time.Now(); ; time.Sleep(pdRetryPollTime) { + if time.Since(start) >= pdRetryTimeout || + ctx.Err() != nil { + return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %v", zone, err) + } + newDiskName, err = createPD(zone) if err != nil { framework.Logf("Couldn't create a new PD in zone %q, sleeping 5 seconds: %v", zone, err) @@ -671,7 +676,6 @@ func createPDWithRetry(zone string) (string, error) { framework.Logf("Successfully created a new PD in zone %q: %q.", zone, newDiskName) return newDiskName, nil } - return "", err } func CreateShare() (string, string, string, error) { @@ -683,19 +687,23 @@ func DeleteShare(accountName, shareName string) error { } // CreatePDWithRetry creates PD with retry. -func CreatePDWithRetry() (string, error) { - return createPDWithRetry("") +func CreatePDWithRetry(ctx context.Context) (string, error) { + return createPDWithRetry(ctx, "") } // CreatePDWithRetryAndZone creates PD on zone with retry. -func CreatePDWithRetryAndZone(zone string) (string, error) { - return createPDWithRetry(zone) +func CreatePDWithRetryAndZone(ctx context.Context, zone string) (string, error) { + return createPDWithRetry(ctx, zone) } // DeletePDWithRetry deletes PD with retry. -func DeletePDWithRetry(diskName string) error { +func DeletePDWithRetry(ctx context.Context, diskName string) error { var err error - for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) { + for start := time.Now(); ; time.Sleep(pdRetryPollTime) { + if time.Since(start) >= pdRetryTimeout || + ctx.Err() != nil { + return fmt.Errorf("timed out while trying to delete PD %q, last error: %v", diskName, err) + } err = deletePD(diskName) if err != nil { framework.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err) @@ -704,7 +712,6 @@ func DeletePDWithRetry(diskName string) error { framework.Logf("Successfully deleted PD %q.", diskName) return nil } - return fmt.Errorf("unable to delete PD %q: %v", diskName, err) } func createPD(zone string) (string, error) { @@ -719,21 +726,21 @@ func deletePD(pdName string) error { } // WaitForPVClaimBoundPhase waits until all pvcs phase set to bound -func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) { +func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) { persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims)) for index, claim := range pvclaims { - err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout) + err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout) if err != nil { return persistentvolumes, err } // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) if err != nil { return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err) } // Get the bounded PV - persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) + persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) if err != nil { return persistentvolumes, fmt.Errorf("PV Get API error: %v", err) } @@ -742,10 +749,10 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist } // WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. -func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error { +func WaitForPersistentVolumePhase(ctx context.Context, phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err) continue @@ -760,13 +767,13 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In } // WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. -func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error { - return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, poll, timeout, true) +func WaitForPersistentVolumeClaimPhase(ctx context.Context, phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error { + return WaitForPersistentVolumeClaimsPhase(ctx, phase, c, ns, []string{pvcName}, poll, timeout, true) } // WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims // to be in a specific phase or until timeout occurs, whichever comes first. -func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error { +func WaitForPersistentVolumeClaimsPhase(ctx context.Context, phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error { if len(pvcNames) == 0 { return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0") } @@ -774,7 +781,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { phaseFoundInAllClaims := true for _, pvcName := range pvcNames { - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, poll, err) phaseFoundInAllClaims = false @@ -798,22 +805,22 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c } // CreatePVSource creates a PV source. -func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) { - diskName, err := CreatePDWithRetryAndZone(zone) +func CreatePVSource(ctx context.Context, zone string) (*v1.PersistentVolumeSource, error) { + diskName, err := CreatePDWithRetryAndZone(ctx, zone) if err != nil { return nil, err } - return framework.TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName) + return framework.TestContext.CloudConfig.Provider.CreatePVSource(ctx, zone, diskName) } // DeletePVSource deletes a PV source. -func DeletePVSource(pvSource *v1.PersistentVolumeSource) error { - return framework.TestContext.CloudConfig.Provider.DeletePVSource(pvSource) +func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error { + return framework.TestContext.CloudConfig.Provider.DeletePVSource(ctx, pvSource) } // GetDefaultStorageClassName returns default storageClass or return error -func GetDefaultStorageClassName(c clientset.Interface) (string, error) { - list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) +func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) { + list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { return "", fmt.Errorf("Error listing storage classes: %v", err) } @@ -834,18 +841,18 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) { } // SkipIfNoDefaultStorageClass skips tests if no default SC can be found. -func SkipIfNoDefaultStorageClass(c clientset.Interface) { - _, err := GetDefaultStorageClassName(c) +func SkipIfNoDefaultStorageClass(ctx context.Context, c clientset.Interface) { + _, err := GetDefaultStorageClassName(ctx, c) if err != nil { e2eskipper.Skipf("error finding default storageClass : %v", err) } } // WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. -func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, poll, timeout time.Duration) error { +func WaitForPersistentVolumeDeleted(ctx context.Context, c clientset.Interface, pvName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err == nil { framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue diff --git a/test/e2e/framework/rc/rc_utils.go b/test/e2e/framework/rc/rc_utils.go index 732596242ca..baf05979aea 100644 --- a/test/e2e/framework/rc/rc_utils.go +++ b/test/e2e/framework/rc/rc_utils.go @@ -17,6 +17,7 @@ limitations under the License. package rc import ( + "context" "fmt" "github.com/onsi/ginkgo/v2" @@ -70,20 +71,21 @@ func ByNameContainer(name string, replicas int32, labels map[string]string, c v1 } // DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods. -func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { - return e2eresource.DeleteResourceAndWaitForGC(c, schema.GroupKind{Kind: "ReplicationController"}, ns, name) +func DeleteRCAndWaitForGC(ctx context.Context, c clientset.Interface, ns, name string) error { + // TODO (pohly): context support + return e2eresource.DeleteResourceAndWaitForGC(ctx, c, schema.GroupKind{Kind: "ReplicationController"}, ns, name) } // ScaleRC scales Replication Controller to be desired size. -func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { - return e2eresource.ScaleResource(clientset, scalesGetter, ns, name, size, wait, schema.GroupKind{Kind: "ReplicationController"}, v1.SchemeGroupVersion.WithResource("replicationcontrollers")) +func ScaleRC(ctx context.Context, clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return e2eresource.ScaleResource(ctx, clientset, scalesGetter, ns, name, size, wait, schema.GroupKind{Kind: "ReplicationController"}, v1.SchemeGroupVersion.WithResource("replicationcontrollers")) } // RunRC Launches (and verifies correctness) of a Replication Controller // and will wait for all pods it spawns to become "Running". -func RunRC(config testutils.RCConfig) error { +func RunRC(ctx context.Context, config testutils.RCConfig) error { ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo config.ContainerDumpFunc = e2ekubectl.LogFailedContainers - return testutils.RunRC(config) + return testutils.RunRC(ctx, config) } diff --git a/test/e2e/framework/replicaset/wait.go b/test/e2e/framework/replicaset/wait.go index a8a9b2aadf0..6cbb879ad0c 100644 --- a/test/e2e/framework/replicaset/wait.go +++ b/test/e2e/framework/replicaset/wait.go @@ -29,9 +29,9 @@ import ( ) // WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready. -func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { - err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name string) error { + err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) { + rs, err := c.AppsV1().ReplicaSets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -44,16 +44,16 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { } // WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum -func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { - return WaitForReplicaSetTargetAvailableReplicasWithTimeout(c, replicaSet, targetReplicaNum, framework.PodStartTimeout) +func WaitForReplicaSetTargetAvailableReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { + return WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx, c, replicaSet, targetReplicaNum, framework.PodStartTimeout) } // WaitForReplicaSetTargetAvailableReplicasWithTimeout waits for .status.availableReplicas of a RS to equal targetReplicaNum // with given timeout. -func WaitForReplicaSetTargetAvailableReplicasWithTimeout(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error { +func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error { desiredGeneration := replicaSet.Generation - err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) + err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/resource/resources.go b/test/e2e/framework/resource/resources.go index 4464c9d30e6..675899c61ee 100644 --- a/test/e2e/framework/resource/resources.go +++ b/test/e2e/framework/resource/resources.go @@ -46,6 +46,7 @@ const ( // ScaleResource scales resource to the given size. func ScaleResource( + ctx context.Context, clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, @@ -61,14 +62,14 @@ func ScaleResource( if !wait { return nil } - return WaitForControlledPodsRunning(clientset, ns, name, kind) + return WaitForControlledPodsRunning(ctx, clientset, ns, name, kind) } // DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods. -func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error { +func DeleteResourceAndWaitForGC(ctx context.Context, c clientset.Interface, kind schema.GroupKind, ns, name string) error { ginkgo.By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns)) - rtObject, err := GetRuntimeObjectForKind(c, kind, ns, name) + rtObject, err := GetRuntimeObjectForKind(ctx, c, kind, ns, name) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("%v %s not found: %v", kind, name, err) @@ -80,15 +81,15 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns background := metav1.DeletePropagationBackground return testutils.DeleteResource(c, kind, ns, name, metav1.DeleteOptions{PropagationPolicy: &background}) } - return deleteObjectAndWaitForGC(c, rtObject, deleteObject, ns, name, kind.String()) + return deleteObjectAndWaitForGC(ctx, c, rtObject, deleteObject, ns, name, kind.String()) } // DeleteCustomResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods. // Enables to provide a custom resourece client, e.g. to fetch a CRD object. -func DeleteCustomResourceAndWaitForGC(c clientset.Interface, dynamicClient dynamic.Interface, scaleClient scaleclient.ScalesGetter, gvr schema.GroupVersionResource, ns, name string) error { +func DeleteCustomResourceAndWaitForGC(ctx context.Context, c clientset.Interface, dynamicClient dynamic.Interface, scaleClient scaleclient.ScalesGetter, gvr schema.GroupVersionResource, ns, name string) error { ginkgo.By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", gvr, name, ns)) resourceClient := dynamicClient.Resource(gvr).Namespace(ns) - _, err := resourceClient.Get(context.TODO(), name, metav1.GetOptions{}) + _, err := resourceClient.Get(ctx, name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("%v %s not found: %v", gvr, name, err) @@ -96,19 +97,19 @@ func DeleteCustomResourceAndWaitForGC(c clientset.Interface, dynamicClient dynam } return err } - scaleObj, err := scaleClient.Scales(ns).Get(context.TODO(), gvr.GroupResource(), name, metav1.GetOptions{}) + scaleObj, err := scaleClient.Scales(ns).Get(ctx, gvr.GroupResource(), name, metav1.GetOptions{}) if err != nil { framework.Logf("error while trying to get scale subresource of kind %v with name %v: %v", gvr, name, err) return nil } deleteObject := func() error { background := metav1.DeletePropagationBackground - return resourceClient.Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &background}) + return resourceClient.Delete(ctx, name, metav1.DeleteOptions{PropagationPolicy: &background}) } - return deleteObjectAndWaitForGC(c, scaleObj, deleteObject, ns, name, gvr.String()) + return deleteObjectAndWaitForGC(ctx, c, scaleObj, deleteObject, ns, name, gvr.String()) } -func deleteObjectAndWaitForGC(c clientset.Interface, rtObject runtime.Object, deleteObject func() error, ns, name, description string) error { +func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObject runtime.Object, deleteObject func() error, ns, name, description string) error { selector, err := GetSelectorFromRuntimeObject(rtObject) if err != nil { return err @@ -154,7 +155,7 @@ func deleteObjectAndWaitForGC(c clientset.Interface, rtObject runtime.Object, de timeout = timeout + 3*time.Minute } - err = waitForPodsInactive(ps, interval, timeout) + err = waitForPodsInactive(ctx, ps, interval, timeout) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } @@ -164,7 +165,7 @@ func deleteObjectAndWaitForGC(c clientset.Interface, rtObject runtime.Object, de // In gce, at any point, small percentage of nodes can disappear for // ~10 minutes due to hostError. 20 minutes should be long enough to // restart VM in that case and delete the pod. - err = waitForPodsGone(ps, interval, 20*time.Minute) + err = waitForPodsGone(ctx, ps, interval, 20*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } @@ -172,9 +173,9 @@ func deleteObjectAndWaitForGC(c clientset.Interface, rtObject runtime.Object, de } // waitForPodsGone waits until there are no pods left in the PodStore. -func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { +func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error { var pods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { if pods = ps.List(); len(pods) == 0 { return true, nil } @@ -194,9 +195,9 @@ func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) er // This is to make a fair comparison of deletion time between DeleteRCAndPods // and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas // when the pod is inactvie. -func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { +func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error { var activePods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { pods := ps.List() activePods = e2epod.FilterActivePods(pods) if len(activePods) != 0 { @@ -215,8 +216,8 @@ func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration } // WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running. -func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error { - rtObject, err := GetRuntimeObjectForKind(c, kind, ns, name) +func WaitForControlledPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, kind schema.GroupKind) error { + rtObject, err := GetRuntimeObjectForKind(ctx, c, kind, ns, name) if err != nil { return err } @@ -236,8 +237,8 @@ func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind s } // WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them. -func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) { - rtObject, err := GetRuntimeObjectForKind(c, kind, ns, name) +func WaitForControlledPods(ctx context.Context, c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) { + rtObject, err := GetRuntimeObjectForKind(ctx, c, kind, ns, name) if err != nil { return nil, err } @@ -245,5 +246,5 @@ func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.G if err != nil { return nil, err } - return e2epod.WaitForPodsWithLabel(c, ns, selector) + return e2epod.WaitForPodsWithLabel(ctx, c, ns, selector) } diff --git a/test/e2e/framework/resource/runtimeobj.go b/test/e2e/framework/resource/runtimeobj.go index d706ce24319..6cc8d221e95 100644 --- a/test/e2e/framework/resource/runtimeobj.go +++ b/test/e2e/framework/resource/runtimeobj.go @@ -44,18 +44,18 @@ var ( // GetRuntimeObjectForKind returns a runtime.Object based on its GroupKind, // namespace and name. -func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { +func GetRuntimeObjectForKind(ctx context.Context, c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { switch kind { case kindReplicationController: - return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{}) + return c.CoreV1().ReplicationControllers(ns).Get(ctx, name, metav1.GetOptions{}) case kindExtensionsReplicaSet, kindAppsReplicaSet: - return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + return c.AppsV1().ReplicaSets(ns).Get(ctx, name, metav1.GetOptions{}) case kindExtensionsDeployment, kindAppsDeployment: - return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) + return c.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{}) case kindExtensionsDaemonSet: - return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + return c.AppsV1().DaemonSets(ns).Get(ctx, name, metav1.GetOptions{}) case kindBatchJob: - return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) + return c.BatchV1().Jobs(ns).Get(ctx, name, metav1.GetOptions{}) default: return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind) } diff --git a/test/e2e/framework/security/apparmor.go b/test/e2e/framework/security/apparmor.go index 7f400a045af..fcd46164ef5 100644 --- a/test/e2e/framework/security/apparmor.go +++ b/test/e2e/framework/security/apparmor.go @@ -39,15 +39,15 @@ const ( ) // LoadAppArmorProfiles creates apparmor-profiles ConfigMap and apparmor-loader ReplicationController. -func LoadAppArmorProfiles(nsName string, clientset clientset.Interface) { - createAppArmorProfileCM(nsName, clientset) - createAppArmorProfileLoader(nsName, clientset) +func LoadAppArmorProfiles(ctx context.Context, nsName string, clientset clientset.Interface) { + createAppArmorProfileCM(ctx, nsName, clientset) + createAppArmorProfileLoader(ctx, nsName, clientset) } // CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with // an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after // a single test, otherwise it will repeat the test every 1 second until failure. -func CreateAppArmorTestPod(nsName string, clientset clientset.Interface, podClient *e2epod.PodClient, unconfined bool, runOnce bool) *v1.Pod { +func CreateAppArmorTestPod(ctx context.Context, nsName string, clientset clientset.Interface, podClient *e2epod.PodClient, unconfined bool, runOnce bool) *v1.Pod { profile := "localhost/" + appArmorProfilePrefix + nsName testCmd := fmt.Sprintf(` if touch %[1]s; then @@ -115,25 +115,25 @@ done`, testCmd) } if runOnce { - pod = podClient.Create(pod) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( + pod = podClient.Create(ctx, pod) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, clientset, pod.Name, nsName)) var err error - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) } else { - pod = podClient.CreateSync(pod) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(clientset, pod.Name, nsName, framework.PodStartTimeout)) + pod = podClient.CreateSync(ctx, pod) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, clientset, pod.Name, nsName, framework.PodStartTimeout)) } // Verify Pod affinity colocated the Pods. - loader := getRunningLoaderPod(nsName, clientset) + loader := getRunningLoaderPod(ctx, nsName, clientset) framework.ExpectEqual(pod.Spec.NodeName, loader.Spec.NodeName) return pod } -func createAppArmorProfileCM(nsName string, clientset clientset.Interface) { +func createAppArmorProfileCM(ctx context.Context, nsName string, clientset clientset.Interface) { profileName := appArmorProfilePrefix + nsName profile := fmt.Sprintf(`#include profile %s flags=(attach_disconnected) { @@ -155,11 +155,11 @@ profile %s flags=(attach_disconnected) { profileName: profile, }, } - _, err := clientset.CoreV1().ConfigMaps(nsName).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err := clientset.CoreV1().ConfigMaps(nsName).Create(ctx, cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap") } -func createAppArmorProfileLoader(nsName string, clientset clientset.Interface) { +func createAppArmorProfileLoader(ctx context.Context, nsName string, clientset clientset.Interface) { True := true One := int32(1) loader := &v1.ReplicationController{ @@ -223,18 +223,18 @@ func createAppArmorProfileLoader(nsName string, clientset clientset.Interface) { }, }, } - _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(context.TODO(), loader, metav1.CreateOptions{}) + _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(ctx, loader, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController") // Wait for loader to be ready. - getRunningLoaderPod(nsName, clientset) + getRunningLoaderPod(ctx, nsName, clientset) } -func getRunningLoaderPod(nsName string, clientset clientset.Interface) *v1.Pod { +func getRunningLoaderPod(ctx context.Context, nsName string, clientset clientset.Interface) *v1.Pod { label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue})) - pods, err := e2epod.WaitForPodsWithLabelScheduled(clientset, nsName, label) + pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, clientset, nsName, label) framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod") pod := &pods.Items[0] - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(clientset, pod), "Failed to run apparmor-loader Pod") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, clientset, pod), "Failed to run apparmor-loader Pod") return pod } diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index c49321febe8..bb9777d96eb 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -108,12 +108,12 @@ func (j *TestJig) newServiceTemplate(proto v1.Protocol, port int32) *v1.Service // CreateTCPServiceWithPort creates a new TCP Service with given port based on the // j's defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int32) (*v1.Service, error) { +func (j *TestJig) CreateTCPServiceWithPort(ctx context.Context, tweak func(svc *v1.Service), port int32) (*v1.Service, error) { svc := j.newServiceTemplate(v1.ProtocolTCP, port) if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) } @@ -123,19 +123,19 @@ func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int // CreateTCPService creates a new TCP Service based on the j's // defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *TestJig) CreateTCPService(tweak func(svc *v1.Service)) (*v1.Service, error) { - return j.CreateTCPServiceWithPort(tweak, 80) +func (j *TestJig) CreateTCPService(ctx context.Context, tweak func(svc *v1.Service)) (*v1.Service, error) { + return j.CreateTCPServiceWithPort(ctx, tweak, 80) } // CreateUDPService creates a new UDP Service based on the j's // defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, error) { +func (j *TestJig) CreateUDPService(ctx context.Context, tweak func(svc *v1.Service)) (*v1.Service, error) { svc := j.newServiceTemplate(v1.ProtocolUDP, 80) if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err) } @@ -144,7 +144,7 @@ func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, er // CreateExternalNameService creates a new ExternalName type Service based on the j's defaults. // Callers can provide a function to tweak the Service object before it is created. -func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Service, error) { +func (j *TestJig) CreateExternalNameService(ctx context.Context, tweak func(svc *v1.Service)) (*v1.Service, error) { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: j.Namespace, @@ -160,7 +160,7 @@ func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Se if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err) } @@ -168,9 +168,9 @@ func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Se } // ChangeServiceType updates the given service's ServiceType to the given newType. -func (j *TestJig) ChangeServiceType(newType v1.ServiceType, timeout time.Duration) error { +func (j *TestJig) ChangeServiceType(ctx context.Context, newType v1.ServiceType, timeout time.Duration) error { ingressIP := "" - svc, err := j.UpdateService(func(s *v1.Service) { + svc, err := j.UpdateService(ctx, func(s *v1.Service) { for _, ing := range s.Status.LoadBalancer.Ingress { if ing.IP != "" { ingressIP = ing.IP @@ -183,7 +183,7 @@ func (j *TestJig) ChangeServiceType(newType v1.ServiceType, timeout time.Duratio return err } if ingressIP != "" { - _, err = j.WaitForLoadBalancerDestroy(ingressIP, int(svc.Spec.Ports[0].Port), timeout) + _, err = j.WaitForLoadBalancerDestroy(ctx, ingressIP, int(svc.Spec.Ports[0].Port), timeout) } return err } @@ -192,9 +192,9 @@ func (j *TestJig) ChangeServiceType(newType v1.ServiceType, timeout time.Duratio // ExternalTrafficPolicy set to Local and sanity checks its nodePort. // If createPod is true, it also creates an RC with 1 replica of // the standard netexec container used everywhere in this test. -func (j *TestJig) CreateOnlyLocalNodePortService(createPod bool) (*v1.Service, error) { +func (j *TestJig) CreateOnlyLocalNodePortService(ctx context.Context, createPod bool) (*v1.Service, error) { ginkgo.By("creating a service " + j.Namespace + "/" + j.Name + " with type=NodePort and ExternalTrafficPolicy=Local") - svc, err := j.CreateTCPService(func(svc *v1.Service) { + svc, err := j.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}} @@ -205,7 +205,7 @@ func (j *TestJig) CreateOnlyLocalNodePortService(createPod bool) (*v1.Service, e if createPod { ginkgo.By("creating a pod to be part of the service " + j.Name) - _, err = j.Run(nil) + _, err = j.Run(ctx, nil) if err != nil { return nil, err } @@ -217,9 +217,9 @@ func (j *TestJig) CreateOnlyLocalNodePortService(createPod bool) (*v1.Service, e // ExternalTrafficPolicy set to Local and waits for it to acquire an ingress IP. // If createPod is true, it also creates an RC with 1 replica of // the standard netexec container used everywhere in this test. -func (j *TestJig) CreateOnlyLocalLoadBalancerService(timeout time.Duration, createPod bool, +func (j *TestJig) CreateOnlyLocalLoadBalancerService(ctx context.Context, timeout time.Duration, createPod bool, tweak func(svc *v1.Service)) (*v1.Service, error) { - _, err := j.CreateLoadBalancerService(timeout, func(svc *v1.Service) { + _, err := j.CreateLoadBalancerService(ctx, timeout, func(svc *v1.Service) { ginkgo.By("setting ExternalTrafficPolicy=Local") svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal if tweak != nil { @@ -232,18 +232,18 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(timeout time.Duration, crea if createPod { ginkgo.By("creating a pod to be part of the service " + j.Name) - _, err = j.Run(nil) + _, err = j.Run(ctx, nil) if err != nil { return nil, err } } ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name) - return j.WaitForLoadBalancer(timeout) + return j.WaitForLoadBalancer(ctx, timeout) } // CreateLoadBalancerService creates a loadbalancer service and waits // for it to acquire an ingress IP. -func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(svc *v1.Service)) (*v1.Service, error) { +func (j *TestJig) CreateLoadBalancerService(ctx context.Context, timeout time.Duration, tweak func(svc *v1.Service)) (*v1.Service, error) { ginkgo.By("creating a service " + j.Namespace + "/" + j.Name + " with type=LoadBalancer") svc := j.newServiceTemplate(v1.ProtocolTCP, 80) svc.Spec.Type = v1.ServiceTypeLoadBalancer @@ -252,25 +252,25 @@ func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(sv if tweak != nil { tweak(svc) } - _, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err) } ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name) - return j.WaitForLoadBalancer(timeout) + return j.WaitForLoadBalancer(ctx, timeout) } // GetEndpointNodes returns a map of nodenames:external-ip on which the // endpoints of the Service are running. -func (j *TestJig) GetEndpointNodes() (map[string][]string, error) { - return j.GetEndpointNodesWithIP(v1.NodeExternalIP) +func (j *TestJig) GetEndpointNodes(ctx context.Context) (map[string][]string, error) { + return j.GetEndpointNodesWithIP(ctx, v1.NodeExternalIP) } // GetEndpointNodesWithIP returns a map of nodenames: on which the // endpoints of the Service are running. -func (j *TestJig) GetEndpointNodesWithIP(addressType v1.NodeAddressType) (map[string][]string, error) { - nodes, err := j.ListNodesWithEndpoint() +func (j *TestJig) GetEndpointNodesWithIP(ctx context.Context, addressType v1.NodeAddressType) (map[string][]string, error) { + nodes, err := j.ListNodesWithEndpoint(ctx) if err != nil { return nil, err } @@ -283,12 +283,11 @@ func (j *TestJig) GetEndpointNodesWithIP(addressType v1.NodeAddressType) (map[st // ListNodesWithEndpoint returns a list of nodes on which the // endpoints of the given Service are running. -func (j *TestJig) ListNodesWithEndpoint() ([]v1.Node, error) { - nodeNames, err := j.GetEndpointNodeNames() +func (j *TestJig) ListNodesWithEndpoint(ctx context.Context) ([]v1.Node, error) { + nodeNames, err := j.GetEndpointNodeNames(ctx) if err != nil { return nil, err } - ctx := context.TODO() allNodes, err := j.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err @@ -304,12 +303,12 @@ func (j *TestJig) ListNodesWithEndpoint() ([]v1.Node, error) { // GetEndpointNodeNames returns a string set of node names on which the // endpoints of the given Service are running. -func (j *TestJig) GetEndpointNodeNames() (sets.String, error) { - err := j.waitForAvailableEndpoint(ServiceEndpointsTimeout) +func (j *TestJig) GetEndpointNodeNames(ctx context.Context) (sets.String, error) { + err := j.waitForAvailableEndpoint(ctx, ServiceEndpointsTimeout) if err != nil { return nil, err } - endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) + endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) } @@ -328,9 +327,9 @@ func (j *TestJig) GetEndpointNodeNames() (sets.String, error) { } // WaitForEndpointOnNode waits for a service endpoint on the given node. -func (j *TestJig) WaitForEndpointOnNode(nodeName string) error { - return wait.PollImmediate(framework.Poll, KubeProxyLagTimeout, func() (bool, error) { - endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) +func (j *TestJig) WaitForEndpointOnNode(ctx context.Context, nodeName string) error { + return wait.PollImmediateWithContext(ctx, framework.Poll, KubeProxyLagTimeout, func(ctx context.Context) (bool, error) { + endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) return false, nil @@ -355,7 +354,7 @@ func (j *TestJig) WaitForEndpointOnNode(nodeName string) error { } // waitForAvailableEndpoint waits for at least 1 endpoint to be available till timeout -func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { +func (j *TestJig) waitForAvailableEndpoint(ctx context.Context, timeout time.Duration) error { //Wait for endpoints to be created, this may take longer time if service backing pods are taking longer time to run endpointSelector := fields.OneTermEqualSelector("metadata.name", j.Name) stopCh := make(chan struct{}) @@ -367,12 +366,12 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = endpointSelector.String() - obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(context.TODO(), options) + obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = endpointSelector.String() - return j.Client.CoreV1().Endpoints(j.Namespace).Watch(context.TODO(), options) + return j.Client.CoreV1().Endpoints(j.Namespace).Watch(ctx, options) }, }, &v1.Endpoints{}, @@ -405,12 +404,12 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = "kubernetes.io/service-name=" + j.Name - obj, err := j.Client.DiscoveryV1().EndpointSlices(j.Namespace).List(context.TODO(), options) + obj, err := j.Client.DiscoveryV1().EndpointSlices(j.Namespace).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = "kubernetes.io/service-name=" + j.Name - return j.Client.DiscoveryV1().EndpointSlices(j.Namespace).Watch(context.TODO(), options) + return j.Client.DiscoveryV1().EndpointSlices(j.Namespace).Watch(ctx, options) }, }, &discoveryv1.EndpointSlice{}, @@ -441,7 +440,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { go esController.Run(stopCh) - err := wait.Poll(1*time.Second, timeout, func() (bool, error) { + err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) { return endpointAvailable && endpointSliceAvailable, nil }) if err != nil { @@ -518,14 +517,14 @@ func needsNodePorts(svc *v1.Service) bool { // UpdateService fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. -func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { +func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (*v1.Service, error) { for i := 0; i < 3; i++ { - service, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) + service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err) } update(service) - result, err := j.Client.CoreV1().Services(j.Namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) + result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{}) if err == nil { return j.sanityCheckService(result, service.Spec.Type) } @@ -537,9 +536,9 @@ func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { } // WaitForNewIngressIP waits for the given service to get a new ingress IP, or returns an error after the given timeout -func (j *TestJig) WaitForNewIngressIP(existingIP string, timeout time.Duration) (*v1.Service, error) { +func (j *TestJig) WaitForNewIngressIP(ctx context.Context, existingIP string, timeout time.Duration) (*v1.Service, error) { framework.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, j.Name) - service, err := j.waitForCondition(timeout, "have a new ingress IP", func(svc *v1.Service) bool { + service, err := j.waitForCondition(ctx, timeout, "have a new ingress IP", func(svc *v1.Service) bool { if len(svc.Status.LoadBalancer.Ingress) == 0 { return false } @@ -556,14 +555,14 @@ func (j *TestJig) WaitForNewIngressIP(existingIP string, timeout time.Duration) } // ChangeServiceNodePort changes node ports of the given service. -func (j *TestJig) ChangeServiceNodePort(initial int) (*v1.Service, error) { +func (j *TestJig) ChangeServiceNodePort(ctx context.Context, initial int) (*v1.Service, error) { var err error var service *v1.Service for i := 1; i < NodePortRange.Size; i++ { offs1 := initial - NodePortRange.Base offs2 := (offs1 + i) % NodePortRange.Size newPort := NodePortRange.Base + offs2 - service, err = j.UpdateService(func(s *v1.Service) { + service, err = j.UpdateService(ctx, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(newPort) }) if err != nil && strings.Contains(err.Error(), errAllocated.Error()) { @@ -577,9 +576,9 @@ func (j *TestJig) ChangeServiceNodePort(initial int) (*v1.Service, error) { } // WaitForLoadBalancer waits the given service to have a LoadBalancer, or returns an error after the given timeout -func (j *TestJig) WaitForLoadBalancer(timeout time.Duration) (*v1.Service, error) { +func (j *TestJig) WaitForLoadBalancer(ctx context.Context, timeout time.Duration) (*v1.Service, error) { framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, j.Name) - service, err := j.waitForCondition(timeout, "have a load balancer", func(svc *v1.Service) bool { + service, err := j.waitForCondition(ctx, timeout, "have a load balancer", func(svc *v1.Service) bool { return len(svc.Status.LoadBalancer.Ingress) > 0 }) if err != nil { @@ -596,16 +595,16 @@ func (j *TestJig) WaitForLoadBalancer(timeout time.Duration) (*v1.Service, error } // WaitForLoadBalancerDestroy waits the given service to destroy a LoadBalancer, or returns an error after the given timeout -func (j *TestJig) WaitForLoadBalancerDestroy(ip string, port int, timeout time.Duration) (*v1.Service, error) { +func (j *TestJig) WaitForLoadBalancerDestroy(ctx context.Context, ip string, port int, timeout time.Duration) (*v1.Service, error) { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { - if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { + if err := framework.EnsureLoadBalancerResourcesDeleted(ctx, ip, strconv.Itoa(port)); err != nil { framework.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) } }() framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, j.Name) - service, err := j.waitForCondition(timeout, "have no load balancer", func(svc *v1.Service) bool { + service, err := j.waitForCondition(ctx, timeout, "have no load balancer", func(svc *v1.Service) bool { return len(svc.Status.LoadBalancer.Ingress) == 0 }) if err != nil { @@ -614,10 +613,10 @@ func (j *TestJig) WaitForLoadBalancerDestroy(ip string, port int, timeout time.D return j.sanityCheckService(service, service.Spec.Type) } -func (j *TestJig) waitForCondition(timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) { +func (j *TestJig) waitForCondition(ctx context.Context, timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) { var service *v1.Service - pollFunc := func() (bool, error) { - svc, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) + pollFunc := func(ctx context.Context) (bool, error) { + svc, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Retrying .... error trying to get Service %s: %v", j.Name, err) return false, nil @@ -628,7 +627,7 @@ func (j *TestJig) waitForCondition(timeout time.Duration, message string, condit } return false, nil } - if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil { + if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollFunc); err != nil { return nil, fmt.Errorf("timed out waiting for service %q to %s: %w", j.Name, message, err) } return service, nil @@ -700,13 +699,13 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { } // CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready -func (j *TestJig) CreatePDB(rc *v1.ReplicationController) (*policyv1.PodDisruptionBudget, error) { +func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (*policyv1.PodDisruptionBudget, error) { pdb := j.newPDBTemplate(rc) - newPdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) + newPdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Create(ctx, pdb, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err) } - if err := j.waitForPdbReady(); err != nil { + if err := j.waitForPdbReady(ctx); err != nil { return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err) } @@ -737,53 +736,53 @@ func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1.PodDisr // Run creates a ReplicationController and Pod(s) and waits for the // Pod(s) to be running. Callers can provide a function to tweak the RC object // before it is created. -func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) { +func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) { rc := j.newRCTemplate() if tweak != nil { tweak(rc) } - result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err) } - pods, err := j.waitForPodsCreated(int(*(rc.Spec.Replicas))) + pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas))) if err != nil { return nil, fmt.Errorf("failed to create pods: %v", err) } - if err := j.waitForPodsReady(pods); err != nil { + if err := j.waitForPodsReady(ctx, pods); err != nil { return nil, fmt.Errorf("failed waiting for pods to be running: %v", err) } return result, nil } // Scale scales pods to the given replicas -func (j *TestJig) Scale(replicas int) error { +func (j *TestJig) Scale(ctx context.Context, replicas int) error { rc := j.Name - scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(context.TODO(), rc, metav1.GetOptions{}) + scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get scale for RC %q: %v", rc, err) } scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(context.TODO(), rc, scale, metav1.UpdateOptions{}) + _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to scale RC %q: %v", rc, err) } - pods, err := j.waitForPodsCreated(replicas) + pods, err := j.waitForPodsCreated(ctx, replicas) if err != nil { return fmt.Errorf("failed waiting for pods: %v", err) } - if err := j.waitForPodsReady(pods); err != nil { + if err := j.waitForPodsReady(ctx, pods); err != nil { return fmt.Errorf("failed waiting for pods to be running: %v", err) } return nil } -func (j *TestJig) waitForPdbReady() error { +func (j *TestJig) waitForPdbReady(ctx context.Context) error { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - pdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) + pdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) if err != nil { return err } @@ -795,14 +794,15 @@ func (j *TestJig) waitForPdbReady() error { return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name) } -func (j *TestJig) waitForPodsCreated(replicas int) ([]string, error) { +func (j *TestJig) waitForPodsCreated(ctx context.Context, replicas int) ([]string, error) { + // TODO (pohly): replace with gomega.Eventually timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(j.Labels)) framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { + for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(2 * time.Second) { options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := j.Client.CoreV1().Pods(j.Namespace).List(context.TODO(), options) + pods, err := j.Client.CoreV1().Pods(j.Namespace).List(ctx, options) if err != nil { return nil, err } @@ -823,31 +823,31 @@ func (j *TestJig) waitForPodsCreated(replicas int) ([]string, error) { return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas) } -func (j *TestJig) waitForPodsReady(pods []string) error { +func (j *TestJig) waitForPodsReady(ctx context.Context, pods []string) error { timeout := 2 * time.Minute - if !e2epod.CheckPodsRunningReady(j.Client, j.Namespace, pods, timeout) { + if !e2epod.CheckPodsRunningReady(ctx, j.Client, j.Namespace, pods, timeout) { return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods)) } return nil } -func testReachabilityOverServiceName(serviceName string, sp v1.ServicePort, execPod *v1.Pod) error { - return testEndpointReachability(serviceName, sp.Port, sp.Protocol, execPod) +func testReachabilityOverServiceName(ctx context.Context, serviceName string, sp v1.ServicePort, execPod *v1.Pod) error { + return testEndpointReachability(ctx, serviceName, sp.Port, sp.Protocol, execPod) } -func testReachabilityOverClusterIP(clusterIP string, sp v1.ServicePort, execPod *v1.Pod) error { +func testReachabilityOverClusterIP(ctx context.Context, clusterIP string, sp v1.ServicePort, execPod *v1.Pod) error { // If .spec.clusterIP is set to "" or "None" for service, ClusterIP is not created, so reachability can not be tested over clusterIP:servicePort if netutils.ParseIPSloppy(clusterIP) == nil { return fmt.Errorf("unable to parse ClusterIP: %s", clusterIP) } - return testEndpointReachability(clusterIP, sp.Port, sp.Protocol, execPod) + return testEndpointReachability(ctx, clusterIP, sp.Port, sp.Protocol, execPod) } -func testReachabilityOverExternalIP(externalIP string, sp v1.ServicePort, execPod *v1.Pod) error { - return testEndpointReachability(externalIP, sp.Port, sp.Protocol, execPod) +func testReachabilityOverExternalIP(ctx context.Context, externalIP string, sp v1.ServicePort, execPod *v1.Pod) error { + return testEndpointReachability(ctx, externalIP, sp.Port, sp.Protocol, execPod) } -func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v1.Pod, clusterIP string, externalIPs bool) error { +func testReachabilityOverNodePorts(ctx context.Context, nodes *v1.NodeList, sp v1.ServicePort, pod *v1.Pod, clusterIP string, externalIPs bool) error { internalAddrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP) isClusterIPV4 := netutils.IsIPv4String(clusterIP) @@ -864,7 +864,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v continue } - err := testEndpointReachability(internalAddr, sp.NodePort, sp.Protocol, pod) + err := testEndpointReachability(ctx, internalAddr, sp.NodePort, sp.Protocol, pod) if err != nil { return err } @@ -876,7 +876,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v framework.Logf("skipping testEndpointReachability() for external address %s as it does not match clusterIP (%s) family", externalAddr, clusterIP) continue } - err := testEndpointReachability(externalAddr, sp.NodePort, sp.Protocol, pod) + err := testEndpointReachability(ctx, externalAddr, sp.NodePort, sp.Protocol, pod) if err != nil { return err } @@ -898,7 +898,7 @@ func isInvalidOrLocalhostAddress(ip string) bool { // testEndpointReachability tests reachability to endpoints (i.e. IP, ServiceName) and ports. Test request is initiated from specified execPod. // TCP and UDP protocol based service are supported at this moment // TODO: add support to test SCTP Protocol based services. -func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol, execPod *v1.Pod) error { +func testEndpointReachability(ctx context.Context, endpoint string, port int32, protocol v1.Protocol, execPod *v1.Pod) error { ep := net.JoinHostPort(endpoint, strconv.Itoa(int(port))) cmd := "" switch protocol { @@ -910,7 +910,7 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol, return fmt.Errorf("service reachability check is not supported for %v", protocol) } - err := wait.PollImmediate(1*time.Second, ServiceReachabilityShortPollTimeout, func() (bool, error) { + err := wait.PollImmediateWithContext(ctx, 1*time.Second, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (bool, error) { _, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { framework.Logf("Service reachability failing with error: %v\nRetrying...", err) @@ -926,28 +926,28 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol, // checkClusterIPServiceReachability ensures that service of type ClusterIP is reachable over // - ServiceName:ServicePort, ClusterIP:ServicePort -func (j *TestJig) checkClusterIPServiceReachability(svc *v1.Service, pod *v1.Pod) error { +func (j *TestJig) checkClusterIPServiceReachability(ctx context.Context, svc *v1.Service, pod *v1.Pod) error { clusterIP := svc.Spec.ClusterIP servicePorts := svc.Spec.Ports externalIPs := svc.Spec.ExternalIPs - err := j.waitForAvailableEndpoint(ServiceEndpointsTimeout) + err := j.waitForAvailableEndpoint(ctx, ServiceEndpointsTimeout) if err != nil { return err } for _, servicePort := range servicePorts { - err = testReachabilityOverServiceName(svc.Name, servicePort, pod) + err = testReachabilityOverServiceName(ctx, svc.Name, servicePort, pod) if err != nil { return err } - err = testReachabilityOverClusterIP(clusterIP, servicePort, pod) + err = testReachabilityOverClusterIP(ctx, clusterIP, servicePort, pod) if err != nil { return err } if len(externalIPs) > 0 { for _, externalIP := range externalIPs { - err = testReachabilityOverExternalIP(externalIP, servicePort, pod) + err = testReachabilityOverExternalIP(ctx, externalIP, servicePort, pod) if err != nil { return err } @@ -962,31 +962,31 @@ func (j *TestJig) checkClusterIPServiceReachability(svc *v1.Service, pod *v1.Pod // ServiceName:ServicePort, ClusterIP:ServicePort and NodeInternalIPs:NodePort // - External clients should be reachable to service over - // NodePublicIPs:NodePort -func (j *TestJig) checkNodePortServiceReachability(svc *v1.Service, pod *v1.Pod) error { +func (j *TestJig) checkNodePortServiceReachability(ctx context.Context, svc *v1.Service, pod *v1.Pod) error { clusterIP := svc.Spec.ClusterIP servicePorts := svc.Spec.Ports // Consider only 2 nodes for testing - nodes, err := e2enode.GetBoundedReadySchedulableNodes(j.Client, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, j.Client, 2) if err != nil { return err } - err = j.waitForAvailableEndpoint(ServiceEndpointsTimeout) + err = j.waitForAvailableEndpoint(ctx, ServiceEndpointsTimeout) if err != nil { return err } for _, servicePort := range servicePorts { - err = testReachabilityOverServiceName(svc.Name, servicePort, pod) + err = testReachabilityOverServiceName(ctx, svc.Name, servicePort, pod) if err != nil { return err } - err = testReachabilityOverClusterIP(clusterIP, servicePort, pod) + err = testReachabilityOverClusterIP(ctx, clusterIP, servicePort, pod) if err != nil { return err } - err = testReachabilityOverNodePorts(nodes, servicePort, pod, clusterIP, j.ExternalIPs) + err = testReachabilityOverNodePorts(ctx, nodes, servicePort, pod, clusterIP, j.ExternalIPs) if err != nil { return err } @@ -997,12 +997,12 @@ func (j *TestJig) checkNodePortServiceReachability(svc *v1.Service, pod *v1.Pod) // checkExternalServiceReachability ensures service of type externalName resolves to IP address and no fake externalName is set // FQDN of kubernetes is used as externalName(for air tight platforms). -func (j *TestJig) checkExternalServiceReachability(svc *v1.Service, pod *v1.Pod) error { +func (j *TestJig) checkExternalServiceReachability(ctx context.Context, svc *v1.Service, pod *v1.Pod) error { // NOTE(claudiub): Windows does not support PQDN. svcName := fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, framework.TestContext.ClusterDNSDomain) // Service must resolve to IP cmd := fmt.Sprintf("nslookup %s", svcName) - return wait.PollImmediate(framework.Poll, ServiceReachabilityShortPollTimeout, func() (done bool, err error) { + return wait.PollImmediateWithContext(ctx, framework.Poll, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (done bool, err error) { _, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd) // NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case, // we can check stderr for the error. @@ -1015,7 +1015,7 @@ func (j *TestJig) checkExternalServiceReachability(svc *v1.Service, pod *v1.Pod) } // CheckServiceReachability ensures that request are served by the services. Only supports Services with type ClusterIP, NodePort and ExternalName. -func (j *TestJig) CheckServiceReachability(svc *v1.Service, pod *v1.Pod) error { +func (j *TestJig) CheckServiceReachability(ctx context.Context, svc *v1.Service, pod *v1.Pod) error { svcType := svc.Spec.Type _, err := j.sanityCheckService(svc, svcType) @@ -1025,20 +1025,20 @@ func (j *TestJig) CheckServiceReachability(svc *v1.Service, pod *v1.Pod) error { switch svcType { case v1.ServiceTypeClusterIP: - return j.checkClusterIPServiceReachability(svc, pod) + return j.checkClusterIPServiceReachability(ctx, svc, pod) case v1.ServiceTypeNodePort: - return j.checkNodePortServiceReachability(svc, pod) + return j.checkNodePortServiceReachability(ctx, svc, pod) case v1.ServiceTypeExternalName: - return j.checkExternalServiceReachability(svc, pod) + return j.checkExternalServiceReachability(ctx, svc, pod) case v1.ServiceTypeLoadBalancer: - return j.checkClusterIPServiceReachability(svc, pod) + return j.checkClusterIPServiceReachability(ctx, svc, pod) default: return fmt.Errorf("unsupported service type \"%s\" to verify service reachability for \"%s\" service. This may due to diverse implementation of the service type", svcType, svc.Name) } } // CreateServicePods creates a replication controller with the label same as service. Service listens to TCP and UDP. -func (j *TestJig) CreateServicePods(replica int) error { +func (j *TestJig) CreateServicePods(ctx context.Context, replica int) error { config := testutils.RCConfig{ Client: j.Client, Name: j.Name, @@ -1050,18 +1050,18 @@ func (j *TestJig) CreateServicePods(replica int) error { Timeout: framework.PodReadyBeforeTimeout, Replicas: replica, } - return e2erc.RunRC(config) + return e2erc.RunRC(ctx, config) } // CreateSCTPServiceWithPort creates a new SCTP Service with given port based on the // j's defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *TestJig) CreateSCTPServiceWithPort(tweak func(svc *v1.Service), port int32) (*v1.Service, error) { +func (j *TestJig) CreateSCTPServiceWithPort(ctx context.Context, tweak func(svc *v1.Service), port int32) (*v1.Service, error) { svc := j.newServiceTemplate(v1.ProtocolSCTP, port) if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create SCTP Service %q: %v", svc.Name, err) } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index 29bae0ee2d5..88b9c9e9904 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -65,18 +65,18 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select // UpdateService fetches a service, calls the update function on it, // and then attempts to send the updated service. It retries up to 2 // times in the face of timeouts and conflicts. -func UpdateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { +func UpdateService(ctx context.Context, c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { var service *v1.Service var err error for i := 0; i < 3; i++ { - service, err = c.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + service, err = c.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { return service, err } update(service) - service, err = c.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) + service, err = c.CoreV1().Services(namespace).Update(ctx, service, metav1.UpdateOptions{}) if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return service, err @@ -86,8 +86,8 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update } // CleanupServiceResources cleans up service Type=LoadBalancer resources. -func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { - framework.TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) +func CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) { + framework.TestContext.CloudConfig.Provider.CleanupServiceResources(ctx, c, loadBalancerName, region, zone) } // GetIngressPoint returns a host on which ingress serves. @@ -100,8 +100,8 @@ func GetIngressPoint(ing *v1.LoadBalancerIngress) string { } // GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. -func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { - nodes, err := e2enode.GetReadySchedulableNodes(cs) +func GetServiceLoadBalancerCreationTimeout(ctx context.Context, cs clientset.Interface) time.Duration { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) if len(nodes.Items) > LargeClusterMinNodesNumber { return loadBalancerCreateTimeoutLarge @@ -110,8 +110,8 @@ func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration } // GetServiceLoadBalancerPropagationTimeout returns a timeout value for propagating a load balancer of a service. -func GetServiceLoadBalancerPropagationTimeout(cs clientset.Interface) time.Duration { - nodes, err := e2enode.GetReadySchedulableNodes(cs) +func GetServiceLoadBalancerPropagationTimeout(ctx context.Context, cs clientset.Interface) time.Duration { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) if len(nodes.Items) > LargeClusterMinNodesNumber { return loadBalancerPropagationTimeoutLarge @@ -120,10 +120,10 @@ func GetServiceLoadBalancerPropagationTimeout(cs clientset.Interface) time.Durat } // CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once. -func CreateServiceForSimpleAppWithPods(c clientset.Interface, contPort int, svcPort int, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) { +func CreateServiceForSimpleAppWithPods(ctx context.Context, c clientset.Interface, contPort int, svcPort int, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) { var err error - theService := CreateServiceForSimpleApp(c, contPort, svcPort, namespace, appName) - e2enode.CreatePodsPerNodeForSimpleApp(c, namespace, appName, podSpec, count) + theService := CreateServiceForSimpleApp(ctx, c, contPort, svcPort, namespace, appName) + e2enode.CreatePodsPerNodeForSimpleApp(ctx, c, namespace, appName, podSpec, count) if block { err = testutils.WaitForPodsWithLabelRunning(c, namespace, labels.SelectorFromSet(labels.Set(theService.Spec.Selector))) } @@ -131,7 +131,7 @@ func CreateServiceForSimpleAppWithPods(c clientset.Interface, contPort int, svcP } // CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label. -func CreateServiceForSimpleApp(c clientset.Interface, contPort, svcPort int, namespace, appName string) *v1.Service { +func CreateServiceForSimpleApp(ctx context.Context, c clientset.Interface, contPort, svcPort int, namespace, appName string) *v1.Service { if appName == "" { panic(fmt.Sprintf("no app name provided")) } @@ -152,7 +152,7 @@ func CreateServiceForSimpleApp(c clientset.Interface, contPort, svcPort int, nam }} } framework.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) - service, err := c.CoreV1().Services(namespace).Create(context.TODO(), &v1.Service{ + service, err := c.CoreV1().Services(namespace).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "service-for-" + appName, Labels: map[string]string{ diff --git a/test/e2e/framework/service/util.go b/test/e2e/framework/service/util.go index dcea55be35b..5b809acd075 100644 --- a/test/e2e/framework/service/util.go +++ b/test/e2e/framework/service/util.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "time" "k8s.io/apimachinery/pkg/util/wait" @@ -25,13 +26,13 @@ import ( ) // TestReachableHTTP tests that the given host serves HTTP on the given port. -func TestReachableHTTP(host string, port int, timeout time.Duration) { - TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout) +func TestReachableHTTP(ctx context.Context, host string, port int, timeout time.Duration) { + TestReachableHTTPWithRetriableErrorCodes(ctx, host, port, []int{}, timeout) } // TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes. -func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { - pollfn := func() (bool, error) { +func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string, port int, retriableErrCodes []int, timeout time.Duration) { + pollfn := func(ctx context.Context) (bool, error) { result := e2enetwork.PokeHTTP(host, port, "/echo?msg=hello", &e2enetwork.HTTPPokeParams{ BodyContains: "hello", @@ -43,7 +44,7 @@ func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableEr return false, nil // caller can retry } - if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil { if err == wait.ErrWaitTimeout { framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout) } else { diff --git a/test/e2e/framework/service/wait.go b/test/e2e/framework/service/wait.go index a46d9b8db7f..577acf251fa 100644 --- a/test/e2e/framework/service/wait.go +++ b/test/e2e/framework/service/wait.go @@ -31,15 +31,15 @@ import ( ) // WaitForServiceDeletedWithFinalizer waits for the service with finalizer to be deleted. -func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) { +func WaitForServiceDeletedWithFinalizer(ctx context.Context, cs clientset.Interface, namespace, name string) { ginkgo.By("Delete service with finalizer") - if err := cs.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { + if err := cs.CoreV1().Services(namespace).Delete(ctx, name, metav1.DeleteOptions{}); err != nil { framework.Failf("Failed to delete service %s/%s", namespace, name) } ginkgo.By("Wait for service to disappear") - if pollErr := wait.PollImmediate(LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) { + svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Service %s/%s is gone.", namespace, name) @@ -56,10 +56,10 @@ func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name // WaitForServiceUpdatedWithFinalizer waits for the service to be updated to have or // don't have a finalizer. -func WaitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name string, hasFinalizer bool) { +func WaitForServiceUpdatedWithFinalizer(ctx context.Context, cs clientset.Interface, namespace, name string, hasFinalizer bool) { ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer)) - if pollErr := wait.PollImmediate(LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) { + svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/skipper/skipper.go b/test/e2e/framework/skipper/skipper.go index abf6e3387a1..913225c9f0d 100644 --- a/test/e2e/framework/skipper/skipper.go +++ b/test/e2e/framework/skipper/skipper.go @@ -103,9 +103,9 @@ func SkipIfFeatureGateEnabled(gate featuregate.Feature) { } // SkipIfMissingResource skips if the gvr resource is missing. -func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) { +func SkipIfMissingResource(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) { resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) - _, err := resourceClient.List(context.TODO(), metav1.ListOptions{}) + _, err := resourceClient.List(ctx, metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) { @@ -144,8 +144,8 @@ func SkipUnlessProviderIs(supportedProviders ...string) { } // SkipUnlessMultizone skips if the cluster does not have multizone. -func SkipUnlessMultizone(c clientset.Interface) { - zones, err := e2enode.GetClusterZones(c) +func SkipUnlessMultizone(ctx context.Context, c clientset.Interface) { + zones, err := e2enode.GetClusterZones(ctx, c) if err != nil { skipInternalf(1, "Error listing cluster zones") } @@ -155,8 +155,8 @@ func SkipUnlessMultizone(c clientset.Interface) { } // SkipIfMultizone skips if the cluster has multizone. -func SkipIfMultizone(c clientset.Interface) { - zones, err := e2enode.GetClusterZones(c) +func SkipIfMultizone(ctx context.Context, c clientset.Interface) { + zones, err := e2enode.GetClusterZones(ctx, c) if err != nil { skipInternalf(1, "Error listing cluster zones") } @@ -243,11 +243,11 @@ func RunIfSystemSpecNameIs(names ...string) { } // SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem run if the component run as pods and client can delete them -func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c clientset.Interface, ns string, labelSet labels.Set) { +func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx context.Context, componentName string, c clientset.Interface, ns string, labelSet labels.Set) { // verify if component run as pod label := labels.SelectorFromSet(labelSet) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts) + pods, err := c.CoreV1().Pods(ns).List(ctx, listOpts) framework.Logf("SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem: %v, %v", pods, err) if err != nil { skipInternalf(1, "Skipped because client failed to get component:%s pod err:%v", componentName, err) @@ -259,7 +259,7 @@ func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c // verify if client can delete pod pod := pods.Items[0] - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil { + if err := c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil { skipInternalf(1, "Skipped because client failed to delete component:%s pod, err:%v", componentName, err) } } diff --git a/test/e2e/framework/skipper/skipper_test.go b/test/e2e/framework/skipper/skipper_test.go index 1fb58784db5..c0c4cd44bcf 100644 --- a/test/e2e/framework/skipper/skipper_test.go +++ b/test/e2e/framework/skipper/skipper_test.go @@ -17,7 +17,6 @@ limitations under the License. package skipper_test import ( - "context" "flag" "testing" @@ -47,10 +46,11 @@ import ( // // // +// // This must be line #50. var _ = ginkgo.Describe("e2e", func() { - ginkgo.It("skips", func(ctx context.Context) { + ginkgo.It("skips", func() { e2eskipper.Skipf("skipping %d, %d, %d", 1, 3, 4) }) }) diff --git a/test/e2e/framework/ssh/ssh.go b/test/e2e/framework/ssh/ssh.go index fa95475961f..f5cf1c6591a 100644 --- a/test/e2e/framework/ssh/ssh.go +++ b/test/e2e/framework/ssh/ssh.go @@ -119,8 +119,8 @@ func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) { // looking for internal IPs. If it can't find an internal IP for every node it // returns an error, though it still returns all hosts that it found in that // case. -func NodeSSHHosts(c clientset.Interface) ([]string, error) { - nodelist := waitListSchedulableNodesOrDie(c) +func NodeSSHHosts(ctx context.Context, c clientset.Interface) ([]string, error) { + nodelist := waitListSchedulableNodesOrDie(ctx, c) hosts := nodeAddresses(nodelist, v1.NodeExternalIP) // If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs. @@ -188,14 +188,14 @@ type Result struct { // NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name, // eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across // cloud providers since it involves ssh. -func NodeExec(nodeName, cmd, provider string) (Result, error) { - return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider) +func NodeExec(ctx context.Context, nodeName, cmd, provider string) (Result, error) { + return SSH(ctx, cmd, net.JoinHostPort(nodeName, SSHPort), provider) } // SSH synchronously SSHs to a node running on provider and runs cmd. If there // is no error performing the SSH, the stdout, stderr, and exit code are // returned. -func SSH(cmd, host, provider string) (Result, error) { +func SSH(ctx context.Context, cmd, host, provider string) (Result, error) { result := Result{Host: host, Cmd: cmd} // Get a signer for the provider. @@ -212,14 +212,14 @@ func SSH(cmd, host, provider string) (Result, error) { } if bastion := os.Getenv(sshBastionEnvKey); len(bastion) > 0 { - stdout, stderr, code, err := runSSHCommandViaBastion(cmd, result.User, bastion, host, signer) + stdout, stderr, code, err := runSSHCommandViaBastion(ctx, cmd, result.User, bastion, host, signer) result.Stdout = stdout result.Stderr = stderr result.Code = code return result, err } - stdout, stderr, code, err := runSSHCommand(cmd, result.User, host, signer) + stdout, stderr, code, err := runSSHCommand(ctx, cmd, result.User, host, signer) result.Stdout = stdout result.Stderr = stderr result.Code = code @@ -229,7 +229,7 @@ func SSH(cmd, host, provider string) (Result, error) { // runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on // host as specific user, along with any SSH-level error. -func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) { +func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signer) (string, string, int, error) { if user == "" { user = os.Getenv("USER") } @@ -241,7 +241,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i } client, err := ssh.Dial("tcp", host, config) if err != nil { - err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { + err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) { fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err) if client, err = ssh.Dial("tcp", host, config); err != nil { return false, nil // retrying, error will be logged above @@ -285,7 +285,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i // host as specific user, along with any SSH-level error. It uses an SSH proxy to connect // to bastion, then via that tunnel connects to the remote host. Similar to // sshutil.RunSSHCommand but scoped to the needs of the test infrastructure. -func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) { +func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) { // Setup the config, dial the server, and open a session. config := &ssh.ClientConfig{ User: user, @@ -295,7 +295,7 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer) } bastionClient, err := ssh.Dial("tcp", bastion, config) if err != nil { - err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { + err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) { fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err) if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil { return false, err @@ -359,7 +359,7 @@ func LogResult(result Result) { } // IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result -func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) { +func IssueSSHCommandWithResult(ctx context.Context, cmd, provider string, node *v1.Node) (*Result, error) { framework.Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { @@ -384,7 +384,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er } framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host) - result, err := SSH(cmd, host, provider) + result, err := SSH(ctx, cmd, host, provider) LogResult(result) if result.Code != 0 || err != nil { @@ -396,8 +396,8 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er } // IssueSSHCommand tries to execute a SSH command -func IssueSSHCommand(cmd, provider string, node *v1.Node) error { - _, err := IssueSSHCommandWithResult(cmd, provider, node) +func IssueSSHCommand(ctx context.Context, cmd, provider string, node *v1.Node) error { + _, err := IssueSSHCommandWithResult(ctx, cmd, provider, node) if err != nil { return err } @@ -419,11 +419,11 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string } // waitListSchedulableNodes is a wrapper around listing nodes supporting retries. -func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { +func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error - if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) { - nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ + if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) { + nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -437,8 +437,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { } // waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries. -func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList { - nodes, err := waitListSchedulableNodes(c) +func waitListSchedulableNodesOrDie(ctx context.Context, c clientset.Interface) *v1.NodeList { + nodes, err := waitListSchedulableNodes(ctx, c) if err != nil { expectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.") } diff --git a/test/e2e/framework/statefulset/fixtures.go b/test/e2e/framework/statefulset/fixtures.go index 9f77e1fb0b5..ca184661a14 100644 --- a/test/e2e/framework/statefulset/fixtures.go +++ b/test/e2e/framework/statefulset/fixtures.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "reflect" "regexp" @@ -153,8 +154,8 @@ func PauseNewPods(ss *appsv1.StatefulSet) { // It fails the test if it finds any pods that are not in phase Running, // or if it finds more than one paused Pod existing at the same time. // This is a no-op if there are no paused pods. -func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) { - podList := GetPodList(c, ss) +func ResumeNextPod(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) { + podList := GetPodList(ctx, c, ss) resumedPod := "" for _, pod := range podList.Items { if pod.Status.Phase != v1.PodRunning { diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index 3ca7647a5be..81ae1c16214 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -38,7 +38,7 @@ import ( ) // CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create. -func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.StatefulSet { +func CreateStatefulSet(ctx context.Context, c clientset.Interface, manifestPath, ns string) *appsv1.StatefulSet { mkpath := func(file string) string { return filepath.Join(manifestPath, file) } @@ -51,28 +51,28 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating " + ss.Name + " service")) - _, err = c.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err = c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) - _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) + _, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) return ss } // GetPodList gets the current Pods in ss. -func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList { +func GetPodList(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) framework.ExpectNoError(err) - podList, err := c.CoreV1().Pods(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + podList, err := c.CoreV1().Pods(ss.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err) return podList } // DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns. -func DeleteAllStatefulSets(c clientset.Interface, ns string) { - ssList, err := c.AppsV1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) +func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string) { + ssList, err := c.AppsV1().StatefulSets(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) framework.ExpectNoError(err) // Scale down each statefulset, then delete it completely. @@ -81,14 +81,14 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { for i := range ssList.Items { ss := &ssList.Items[i] var err error - if ss, err = Scale(c, ss, 0); err != nil { + if ss, err = Scale(ctx, c, ss, 0); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } - WaitForStatusReplicas(c, ss, 0) + WaitForStatusReplicas(ctx, c, ss, 0) framework.Logf("Deleting statefulset %v", ss.Name) // Use OrphanDependents=false so it's deleted synchronously. // We already made sure the Pods are gone inside Scale(). - if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { + if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ctx, ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } } @@ -96,8 +96,8 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { // pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs. pvNames := sets.NewString() // TODO: Don't assume all pvcs in the ns belong to a statefulset - pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvcPollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) return false, nil @@ -106,7 +106,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames.Insert(pvc.Spec.VolumeName) // TODO: Double check that there are no pods referencing the pvc framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) - if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvc.Name, metav1.DeleteOptions{}); err != nil { return false, nil } } @@ -116,8 +116,8 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion.")) } - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { + pvList, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) return false, nil @@ -143,16 +143,16 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { } // Scale scales ss to count replicas. -func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) { +func Scale(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) { name := ss.Name ns := ss.Namespace framework.Logf("Scaling statefulset %s to %d", name, count) - ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count }) + ss = update(ctx, c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count }) var statefulPodList *v1.PodList - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - statefulPodList = GetPodList(c, ss) + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { + statefulPodList = GetPodList(ctx, c, ss) if int32(len(statefulPodList.Items)) == count { return true, nil } @@ -172,26 +172,26 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1. } // UpdateReplicas updates the replicas of ss to count. -func UpdateReplicas(c clientset.Interface, ss *appsv1.StatefulSet, count int32) { - update(c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count }) +func UpdateReplicas(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, count int32) { + update(ctx, c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count }) } // Restart scales ss to 0 and then back to its previous number of replicas. -func Restart(c clientset.Interface, ss *appsv1.StatefulSet) { +func Restart(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) { oldReplicas := *(ss.Spec.Replicas) - ss, err := Scale(c, ss, 0) + ss, err := Scale(ctx, c, ss, 0) framework.ExpectNoError(err) // Wait for controller to report the desired number of Pods. // This way we know the controller has observed all Pod deletions // before we scale it back up. - WaitForStatusReplicas(c, ss, 0) - update(c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas }) + WaitForStatusReplicas(ctx, c, ss, 0) + update(ctx, c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas }) } // CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed. -func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error { +func CheckHostname(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) error { cmd := "printf $(hostname)" - podList := GetPodList(c, ss) + podList := GetPodList(ctx, c, ss) for _, statefulPod := range podList.Items { hostname, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) if err != nil { @@ -205,7 +205,7 @@ func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error { } // CheckMount checks that the mount at mountPath is valid for all Pods in ss. -func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string) error { +func CheckMount(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, mountPath string) error { for _, cmd := range []string{ // Print inode, size etc fmt.Sprintf("ls -idlh %v", mountPath), @@ -214,7 +214,7 @@ func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string) // Try writing fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))), } { - if err := ExecInStatefulPods(c, ss, cmd); err != nil { + if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil { return fmt.Errorf("failed to execute %v, error: %v", cmd, err) } } @@ -234,8 +234,8 @@ func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error } // ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods. -func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error { - podList := GetPodList(c, ss) +func ExecInStatefulPods(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error { + podList := GetPodList(ctx, c, ss) for _, statefulPod := range podList.Items { stdout, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout) @@ -247,14 +247,14 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin } // update updates a statefulset, and it is only used within rest.go -func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet { +func update(ctx context.Context, c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet { for i := 0; i < 3; i++ { - ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { framework.Failf("failed to get statefulset %q: %v", name, err) } update(ss) - ss, err = c.AppsV1().StatefulSets(ns).Update(context.TODO(), ss, metav1.UpdateOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Update(ctx, ss, metav1.UpdateOptions{}) if err == nil { return ss } diff --git a/test/e2e/framework/statefulset/wait.go b/test/e2e/framework/statefulset/wait.go index c78f2fdcfec..f05c50dc353 100644 --- a/test/e2e/framework/statefulset/wait.go +++ b/test/e2e/framework/statefulset/wait.go @@ -31,10 +31,10 @@ import ( // WaitForRunning waits for numPodsRunning in ss to be Running and for the first // numPodsReady ordinals to be Ready. -func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) { - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, - func() (bool, error) { - podList := GetPodList(c, ss) +func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) { + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, + func(ctx context.Context) (bool, error) { + podList := GetPodList(ctx, c, ss) SortStatefulPods(podList) if int32(len(podList.Items)) < numPodsRunning { framework.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning) @@ -60,14 +60,14 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s } // WaitForState periodically polls for the ss and its pods until the until function returns either true or an error -func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) { - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, - func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{}) +func WaitForState(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) { + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, + func(ctx context.Context) (bool, error) { + ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ctx, ss.Name, metav1.GetOptions{}) if err != nil { return false, err } - podList := GetPodList(c, ssGet) + podList := GetPodList(ctx, c, ssGet) return until(ssGet, podList) }) if pollErr != nil { @@ -76,14 +76,14 @@ func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*app } // WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready. -func WaitForRunningAndReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { - WaitForRunning(c, numStatefulPods, numStatefulPods, ss) +func WaitForRunningAndReady(ctx context.Context, c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { + WaitForRunning(ctx, c, numStatefulPods, numStatefulPods, ss) } // WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition. -func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { +func WaitForPodReady(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { var pods *v1.PodList - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -97,13 +97,13 @@ func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName str } // WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas -func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { +func WaitForStatusReadyReplicas(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) ns, name := ss.Namespace, ss.Name - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, - func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, + func(ctx context.Context) (bool, error) { + ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -122,13 +122,13 @@ func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, e } // WaitForStatusAvailableReplicas waits for the ss.Status.Available to be equal to expectedReplicas -func WaitForStatusAvailableReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { +func WaitForStatusAvailableReplicas(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { framework.Logf("Waiting for statefulset status.AvailableReplicas updated to %d", expectedReplicas) ns, name := ss.Namespace, ss.Name - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, - func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, + func(ctx context.Context) (bool, error) { + ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -147,13 +147,13 @@ func WaitForStatusAvailableReplicas(c clientset.Interface, ss *appsv1.StatefulSe } // WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas -func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { +func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) ns, name := ss.Namespace, ss.Name - pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, - func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, + func(ctx context.Context) (bool, error) { + ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -172,12 +172,12 @@ func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expect } // Saturate waits for all Pods in ss to become Running and Ready. -func Saturate(c clientset.Interface, ss *appsv1.StatefulSet) { +func Saturate(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) { var i int32 for i = 0; i < *(ss.Spec.Replicas); i++ { framework.Logf("Waiting for stateful pod at index %v to enter Running", i) - WaitForRunning(c, i+1, i, ss) + WaitForRunning(ctx, c, i+1, i, ss) framework.Logf("Resuming stateful pod at index %v", i) - ResumeNextPod(c, ss) + ResumeNextPod(ctx, c, ss) } } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 6108cddf740..03a94c2e980 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "crypto/rand" "encoding/base64" "errors" @@ -214,8 +215,10 @@ type NodeKillerConfig struct { JitterFactor float64 // SimulatedDowntime is a duration between node is killed and recreated. SimulatedDowntime time.Duration - // NodeKillerStopCh is a channel that is used to notify NodeKiller to stop killing nodes. - NodeKillerStopCh chan struct{} + // NodeKillerStopCtx is a context that is used to notify NodeKiller to stop killing nodes. + NodeKillerStopCtx context.Context + // NodeKillerStop is the cancel function for NodeKillerStopCtx. + NodeKillerStop func() } // NodeTestContextType is part of TestContextType, it is shared by all node e2e test. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 011cf67132c..964dbc6fc2f 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -146,7 +146,7 @@ var ( var RunID = uuid.NewUUID() // CreateTestingNSFn is a func that is responsible for creating namespace used for executing e2e tests. -type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) +type CreateTestingNSFn func(ctx context.Context, baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) // APIAddress returns a address of an instance. func APIAddress() string { @@ -198,9 +198,9 @@ func NodeOSArchIs(supportedNodeOsArchs ...string) bool { // DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. -func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { +func DeleteNamespaces(ctx context.Context, c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { ginkgo.By("Deleting namespaces") - nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) ExpectNoError(err, "Failed to get namespace list") var deleted []string var wg sync.WaitGroup @@ -228,7 +228,7 @@ OUTER: go func(nsName string) { defer wg.Done() defer ginkgo.GinkgoRecover() - gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{})).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Namespaces().Delete(ctx, nsName, metav1.DeleteOptions{})).To(gomega.Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } @@ -237,16 +237,16 @@ OUTER: } // WaitForNamespacesDeleted waits for the namespaces to be deleted. -func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { +func WaitForNamespacesDeleted(ctx context.Context, c clientset.Interface, namespaces []string, timeout time.Duration) error { ginkgo.By(fmt.Sprintf("Waiting for namespaces %+v to vanish", namespaces)) nsMap := map[string]bool{} for _, ns := range namespaces { nsMap[ns] = true } //Now POLL until all namespaces have been eradicated. - return wait.Poll(2*time.Second, timeout, - func() (bool, error) { - nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + return wait.PollWithContext(ctx, 2*time.Second, timeout, + func(ctx context.Context) (bool, error) { + nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -259,20 +259,20 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou }) } -func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout time.Duration) error { +func waitForConfigMapInNamespace(ctx context.Context, c clientset.Interface, ns, name string, timeout time.Duration) error { fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String() + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, timeout) + defer cancel() lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) { options.FieldSelector = fieldSelector - return c.CoreV1().ConfigMaps(ns).List(context.TODO(), options) + return c.CoreV1().ConfigMaps(ns).List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), options) + return c.CoreV1().ConfigMaps(ns).Watch(ctx, options) }, } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() _, err := watchtools.UntilWithSync(ctx, lw, &v1.ConfigMap{}, nil, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: @@ -285,20 +285,20 @@ func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout return err } -func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { +func waitForServiceAccountInNamespace(ctx context.Context, c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { fieldSelector := fields.OneTermEqualSelector("metadata.name", serviceAccountName).String() + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, timeout) + defer cancel() lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) { options.FieldSelector = fieldSelector - return c.CoreV1().ServiceAccounts(ns).List(context.TODO(), options) + return c.CoreV1().ServiceAccounts(ns).List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { options.FieldSelector = fieldSelector - return c.CoreV1().ServiceAccounts(ns).Watch(context.TODO(), options) + return c.CoreV1().ServiceAccounts(ns).Watch(ctx, options) }, } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() _, err := watchtools.UntilWithSync(ctx, lw, &v1.ServiceAccount{}, nil, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: @@ -317,20 +317,20 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned -func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error { - return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) +func WaitForDefaultServiceAccountInNamespace(ctx context.Context, c clientset.Interface, namespace string) error { + return waitForServiceAccountInNamespace(ctx, c, namespace, "default", ServiceAccountProvisionTimeout) } // WaitForKubeRootCAInNamespace waits for the configmap kube-root-ca.crt containing the service account // CA trust bundle to be provisioned in the specified namespace so that pods do not have to retry mounting // the config map (which creates noise that hides other issues in the Kubelet). -func WaitForKubeRootCAInNamespace(c clientset.Interface, namespace string) error { - return waitForConfigMapInNamespace(c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout) +func WaitForKubeRootCAInNamespace(ctx context.Context, c clientset.Interface, namespace string) error { + return waitForConfigMapInNamespace(ctx, c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout) } // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. -func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { +func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { if labels == nil { labels = map[string]string{} } @@ -351,9 +351,9 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s } // Be robust about making the namespace creation call. var got *v1.Namespace - if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { + if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) { var err error - got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{}) + got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{}) if err != nil { if apierrors.IsAlreadyExists(err) { // regenerate on conflict @@ -370,7 +370,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s } if TestContext.VerifyServiceAccount { - if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { + if err := WaitForDefaultServiceAccountInNamespace(ctx, c, got.Name); err != nil { // Even if we fail to create serviceAccount in the namespace, // we have successfully create a namespace. // So, return the created namespace. @@ -382,7 +382,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s // CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state // and waits until they are finally deleted. It ignores namespace skip. -func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { +func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, skip string) error { // TODO: Since we don't have support for bulk resource deletion in the API, // while deleting a namespace we are deleting all objects from that namespace // one by one (one deletion == one API call). This basically exposes us to @@ -398,7 +398,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { - namespaces, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + namespaces, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue @@ -420,10 +420,10 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { } // WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. -func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { - return wait.Poll(interval, timeout, func() (bool, error) { +func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { + return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) - list, err := c.CoreV1().Endpoints(namespace).List(context.TODO(), metav1.ListOptions{}) + list, err := c.CoreV1().Endpoints(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -547,8 +547,8 @@ func TryKill(cmd *exec.Cmd) { // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. -func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { - return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ip, portRange) +func EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error { + return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ctx, ip, portRange) } // CoreDump SSHs to the master and all nodes and dumps their logs into dir. @@ -613,11 +613,11 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er // getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes. // If any of these is unavailable, empty slices are returned. -func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []string) { +func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]string, []string, []string) { var externalIPs, internalIPs, hostnames []string // Populate the internal IPs. - eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } @@ -647,8 +647,8 @@ func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []stri // It may return internal and external IPs, even if we expect for // e.g. internal IPs to be used (issue #56787), so that we can be // sure to block the control plane fully during tests. -func GetControlPlaneAddresses(c clientset.Interface) []string { - externalIPs, internalIPs, _ := getControlPlaneAddresses(c) +func GetControlPlaneAddresses(ctx context.Context, c clientset.Interface) []string { + externalIPs, internalIPs, _ := getControlPlaneAddresses(ctx, c) ips := sets.NewString() switch TestContext.Provider { @@ -685,7 +685,7 @@ func PrettyPrintJSON(metrics interface{}) string { // WatchEventSequenceVerifier ... // manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure // -// testContext cancellation signal across API boundaries, e.g: context.TODO() +// ctx cancellation signal across API boundaries, e.g: context from Ginkgo // dc sets up a client to the API // resourceType specify the type of resource // namespace select a namespace diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index ad3739fda5a..a880465b8c2 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -149,7 +149,7 @@ type Test struct { } // NewNFSServer is a NFS-specific wrapper for CreateStorageServer. -func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) { +func NewNFSServer(ctx context.Context, cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) { config = TestConfig{ Namespace: namespace, Prefix: "nfs", @@ -161,7 +161,7 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf if len(args) > 0 { config.ServerArgs = args } - pod, host = CreateStorageServer(cs, config) + pod, host = CreateStorageServer(ctx, cs, config) if strings.Contains(host, ":") { host = "[" + host + "]" } @@ -171,8 +171,8 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf // CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer // and ip address string are returned. // Note: Expect() is called so no error is returned. -func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) { - pod = startVolumeServer(cs, config) +func CreateStorageServer(ctx context.Context, cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) { + pod = startVolumeServer(ctx, cs, config) gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil") ip = pod.Status.PodIP gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name)) @@ -182,7 +182,7 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod // GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name, // and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace. -func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string { +func GetVolumeAttachmentName(ctx context.Context, cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string { var nodeName string // For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is // bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the @@ -190,9 +190,9 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio // To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a // PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment. if config.ClientNodeSelection.Name == "" { - claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{}) + claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{}) pvName := claim.Spec.VolumeName - volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{}) + volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{}) for _, volumeAttachment := range volumeAttachments.Items { if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName { nodeName = volumeAttachment.Spec.NodeName @@ -202,21 +202,21 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio } else { nodeName = config.ClientNodeSelection.Name } - handle := getVolumeHandle(cs, claimName, claimNamespace) + handle := getVolumeHandle(ctx, cs, claimName, claimNamespace) attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName))) return fmt.Sprintf("csi-%x", attachmentHash) } // getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace. -func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace string) string { +func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName string, claimNamespace string) string { // re-get the claim to the latest state with bound volume - claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{}) + claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Cannot get PVC") return "" } pvName := claim.Spec.VolumeName - pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Cannot get PV") return "" @@ -229,9 +229,9 @@ func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace st } // WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated. -func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface, timeout time.Duration) error { - waitErr := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { - _, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{}) +func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error { + waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { + _, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{}) if err != nil { // if the volumeattachment object is not found, it means it has been terminated. if apierrors.IsNotFound(err) { @@ -250,7 +250,7 @@ func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Inter // startVolumeServer starts a container specified by config.serverImage and exports all // config.serverPorts from it. The returned pod should be used to get the server // IP address and create appropriate VolumeSource. -func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { +func startVolumeServer(ctx context.Context, client clientset.Interface, config TestConfig) *v1.Pod { podClient := client.CoreV1().Pods(config.Namespace) portCount := len(config.ServerPorts) @@ -331,13 +331,13 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } var pod *v1.Pod - serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{}) + serverPod, err := podClient.Create(ctx, serverPod, metav1.CreateOptions{}) // ok if the server pod already exists. TODO: make this controllable by callers if err != nil { if apierrors.IsAlreadyExists(err) { framework.Logf("Ignore \"already-exists\" error, re-get pod...") ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName)) - serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{}) + serverPod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err) pod = serverPod } else { @@ -345,13 +345,13 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } } if config.WaitForCompletion { - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) - framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, client, serverPod.Name, serverPod.Namespace)) + framework.ExpectNoError(podClient.Delete(ctx, serverPod.Name, metav1.DeleteOptions{})) } else { - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, client, serverPod)) if pod == nil { ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName)) - pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err) } } @@ -363,7 +363,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } // TestServerCleanup cleans server pod. -func TestServerCleanup(f *framework.Framework, config TestConfig) { +func TestServerCleanup(ctx context.Context, f *framework.Framework, config TestConfig) { ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix)) defer ginkgo.GinkgoRecover() @@ -371,11 +371,11 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) { return } - err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace) + err := e2epod.DeletePodWithWaitByName(ctx, f.ClientSet, config.Prefix+"-server", config.Namespace) gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace) } -func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) { +func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) { ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix)) var gracePeriod int64 = 1 var command string @@ -453,18 +453,18 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC }) } podsNamespacer := client.CoreV1().Pods(config.Namespace) - clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{}) + clientPod, err := podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{}) if err != nil { return nil, err } if slow { - err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow) } else { - err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStart) } if err != nil { - e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name) - e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name) + _ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) return nil, err } return clientPod, nil @@ -519,8 +519,8 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string // Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet), // pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout. // It should be used for cases where "regular" dynamic provisioning of an empty volume is requested. -func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { - testVolumeClient(f, config, fsGroup, fsType, tests, false) +func TestVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { + testVolumeClient(ctx, f, config, fsGroup, fsType, tests, false) } // TestVolumeClientSlow is the same as TestVolumeClient except for its timeout. @@ -528,21 +528,21 @@ func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, // pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout. // It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning // or snapshot restore. -func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { - testVolumeClient(f, config, fsGroup, fsType, tests, true) +func TestVolumeClientSlow(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { + testVolumeClient(ctx, f, config, fsGroup, fsType, tests, true) } -func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) { +func testVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) { timeouts := f.Timeouts - clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow) + clientPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow) if err != nil { framework.Failf("Failed to create client pod: %v", err) } defer func() { // testVolumeClient might get used more than once per test, therefore // we have to clean up before returning. - e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name) - framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) + e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() testVolumeContent(f, clientPod, "", fsGroup, fsType, tests) @@ -553,7 +553,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, } ec.Resources = v1.ResourceRequirements{} ec.Name = "volume-ephemeral-container" - err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart) + err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart) // The API server will return NotFound for the subresource when the feature is disabled framework.ExpectNoError(err, "failed to add ephemeral container for re-test") testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests) @@ -562,13 +562,13 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, // InjectContent inserts index.html with given content into given volume. It does so by // starting and auxiliary pod which writes the file there. // The volume must be writable. -func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { +func InjectContent(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { privileged := true timeouts := f.Timeouts if framework.NodeOSDistroIs("windows") { privileged = false } - injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/) + injectorPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/) if err != nil { framework.Failf("Failed to create injector pod: %v", err) return @@ -576,8 +576,8 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs defer func() { // This pod must get deleted before the function returns becaue the test relies on // the volume not being in use. - e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name) - framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) + e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() ginkgo.By("Writing text file contents in the container.") diff --git a/test/e2e/instrumentation/core_events.go b/test/e2e/instrumentation/core_events.go index a7b74275255..a4e64c18dd2 100644 --- a/test/e2e/instrumentation/core_events.go +++ b/test/e2e/instrumentation/core_events.go @@ -62,7 +62,7 @@ var _ = common.SIGDescribe("Events", func() { ginkgo.By("creating a test event") // create a test event in test namespace - _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(context.TODO(), &v1.Event{ + _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(ctx, &v1.Event{ ObjectMeta: metav1.ObjectMeta{ Name: eventTestName, Labels: map[string]string{ @@ -81,7 +81,7 @@ var _ = common.SIGDescribe("Events", func() { ginkgo.By("listing all events in all namespaces") // get a list of Events in all namespaces to ensure endpoint coverage - eventsList, err := f.ClientSet.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{ + eventsList, err := f.ClientSet.CoreV1().Events("").List(ctx, metav1.ListOptions{ LabelSelector: "testevent-constant=true", }) framework.ExpectNoError(err, "failed list all events") @@ -107,18 +107,18 @@ var _ = common.SIGDescribe("Events", func() { }) framework.ExpectNoError(err, "failed to marshal the patch JSON payload") - _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Patch(context.TODO(), eventTestName, types.StrategicMergePatchType, []byte(eventPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Patch(ctx, eventTestName, types.StrategicMergePatchType, []byte(eventPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch the test event") ginkgo.By("fetching the test event") // get event by name - event, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(context.TODO(), eventCreatedName, metav1.GetOptions{}) + event, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(ctx, eventCreatedName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the test event") framework.ExpectEqual(event.Message, eventPatchMessage, "test event message does not match patch message") ginkgo.By("updating the test event") - testEvent, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(context.TODO(), event.Name, metav1.GetOptions{}) + testEvent, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(ctx, event.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get test event") testEvent.Series = &v1.EventSeries{ @@ -130,11 +130,11 @@ var _ = common.SIGDescribe("Events", func() { testEvent.ObjectMeta.ResourceVersion = "" testEvent.ObjectMeta.ManagedFields = nil - _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Update(context.TODO(), testEvent, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Update(ctx, testEvent, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update the test event") ginkgo.By("getting the test event") - event, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(context.TODO(), testEvent.Name, metav1.GetOptions{}) + event, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(ctx, testEvent.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get test event") // clear ResourceVersion and ManagedFields which are set by control-plane event.ObjectMeta.ResourceVersion = "" @@ -145,12 +145,12 @@ var _ = common.SIGDescribe("Events", func() { ginkgo.By("deleting the test event") // delete original event - err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(context.TODO(), eventCreatedName, metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(ctx, eventCreatedName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("listing all events in all namespaces") // get a list of Events list namespace - eventsList, err = f.ClientSet.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{ + eventsList, err = f.ClientSet.CoreV1().Events("").List(ctx, metav1.ListOptions{ LabelSelector: "testevent-constant=true", }) framework.ExpectNoError(err, "fail to list all events") @@ -179,7 +179,7 @@ var _ = common.SIGDescribe("Events", func() { // create a test event in test namespace for _, eventTestName := range eventTestNames { eventMessage := "This is " + eventTestName - _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(context.TODO(), &v1.Event{ + _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(ctx, &v1.Event{ ObjectMeta: metav1.ObjectMeta{ Name: eventTestName, @@ -199,7 +199,7 @@ var _ = common.SIGDescribe("Events", func() { ginkgo.By("get a list of Events with a label in the current namespace") // get a list of events - eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: "testevent-set=true", }) framework.ExpectNoError(err, "failed to get a list of events") @@ -210,25 +210,25 @@ var _ = common.SIGDescribe("Events", func() { // delete collection framework.Logf("requesting DeleteCollection of events") - err = f.ClientSet.CoreV1().Events(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().Events(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "testevent-set=true"}) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("check that the list of events matches the requested quantity") - err = wait.PollImmediate(eventRetryPeriod, eventRetryTimeout, checkEventListQuantity(f, "testevent-set=true", 0)) + err = wait.PollImmediateWithContext(ctx, eventRetryPeriod, eventRetryTimeout, checkEventListQuantity(f, "testevent-set=true", 0)) framework.ExpectNoError(err, "failed to count required events") }) }) -func checkEventListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { - return func() (bool, error) { +func checkEventListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { var err error framework.Logf("requesting list of events to confirm quantity") - eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: label}) if err != nil { diff --git a/test/e2e/instrumentation/events.go b/test/e2e/instrumentation/events.go index 0086ea9d826..493feae1140 100644 --- a/test/e2e/instrumentation/events.go +++ b/test/e2e/instrumentation/events.go @@ -60,8 +60,8 @@ func newTestEvent(namespace, name, label string) *eventsv1.Event { } } -func eventExistsInList(client typedeventsv1.EventInterface, namespace, name string) bool { - eventsList, err := client.List(context.TODO(), metav1.ListOptions{ +func eventExistsInList(ctx context.Context, client typedeventsv1.EventInterface, namespace, name string) bool { + eventsList, err := client.List(ctx, metav1.ListOptions{ LabelSelector: "testevent-constant=true", }) framework.ExpectNoError(err, "failed to list events") @@ -99,37 +99,37 @@ var _ = common.SIGDescribe("Events API", func() { eventName := "event-test" ginkgo.By("creating a test event") - _, err := client.Create(context.TODO(), newTestEvent(f.Namespace.Name, eventName, "testevent-constant"), metav1.CreateOptions{}) + _, err := client.Create(ctx, newTestEvent(f.Namespace.Name, eventName, "testevent-constant"), metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create test event") ginkgo.By("listing events in all namespaces") - foundCreatedEvent := eventExistsInList(clientAllNamespaces, f.Namespace.Name, eventName) + foundCreatedEvent := eventExistsInList(ctx, clientAllNamespaces, f.Namespace.Name, eventName) if !foundCreatedEvent { framework.Failf("Failed to find test event %s in namespace %s, in list with cluster scope", eventName, f.Namespace.Name) } ginkgo.By("listing events in test namespace") - foundCreatedEvent = eventExistsInList(client, f.Namespace.Name, eventName) + foundCreatedEvent = eventExistsInList(ctx, client, f.Namespace.Name, eventName) if !foundCreatedEvent { framework.Failf("Failed to find test event %s in namespace %s, in list with namespace scope", eventName, f.Namespace.Name) } ginkgo.By("listing events with field selection filtering on source") - filteredCoreV1List, err := coreClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "source=test-controller"}) + filteredCoreV1List, err := coreClient.List(ctx, metav1.ListOptions{FieldSelector: "source=test-controller"}) framework.ExpectNoError(err, "failed to get filtered list") if len(filteredCoreV1List.Items) != 1 || filteredCoreV1List.Items[0].Name != eventName { framework.Failf("expected single event, got %#v", filteredCoreV1List.Items) } ginkgo.By("listing events with field selection filtering on reportingController") - filteredEventsV1List, err := client.List(context.TODO(), metav1.ListOptions{FieldSelector: "reportingController=test-controller"}) + filteredEventsV1List, err := client.List(ctx, metav1.ListOptions{FieldSelector: "reportingController=test-controller"}) framework.ExpectNoError(err, "failed to get filtered list") if len(filteredEventsV1List.Items) != 1 || filteredEventsV1List.Items[0].Name != eventName { framework.Failf("expected single event, got %#v", filteredEventsV1List.Items) } ginkgo.By("getting the test event") - testEvent, err := client.Get(context.TODO(), eventName, metav1.GetOptions{}) + testEvent, err := client.Get(ctx, eventName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get test event") ginkgo.By("patching the test event") @@ -146,11 +146,11 @@ var _ = common.SIGDescribe("Events API", func() { patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, eventsv1.Event{}) framework.ExpectNoError(err, "failed to create two-way merge patch") - _, err = client.Patch(context.TODO(), eventName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.Patch(ctx, eventName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch the test event") ginkgo.By("getting the test event") - event, err := client.Get(context.TODO(), eventName, metav1.GetOptions{}) + event, err := client.Get(ctx, eventName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get test event") // clear ResourceVersion and ManagedFields which are set by control-plane event.ObjectMeta.ResourceVersion = "" @@ -168,11 +168,11 @@ var _ = common.SIGDescribe("Events API", func() { Count: 100, LastObservedTime: metav1.MicroTime{Time: time.Unix(1505828956, 0)}, } - _, err = client.Update(context.TODO(), testEvent, metav1.UpdateOptions{}) + _, err = client.Update(ctx, testEvent, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update the test event") ginkgo.By("getting the test event") - event, err = client.Get(context.TODO(), eventName, metav1.GetOptions{}) + event, err = client.Get(ctx, eventName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get test event") // clear ResourceVersion and ManagedFields which are set by control-plane event.ObjectMeta.ResourceVersion = "" @@ -182,17 +182,17 @@ var _ = common.SIGDescribe("Events API", func() { } ginkgo.By("deleting the test event") - err = client.Delete(context.TODO(), eventName, metav1.DeleteOptions{}) + err = client.Delete(ctx, eventName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("listing events in all namespaces") - foundCreatedEvent = eventExistsInList(clientAllNamespaces, f.Namespace.Name, eventName) + foundCreatedEvent = eventExistsInList(ctx, clientAllNamespaces, f.Namespace.Name, eventName) if foundCreatedEvent { framework.Failf("Should not have found test event %s in namespace %s, in list with cluster scope after deletion", eventName, f.Namespace.Name) } ginkgo.By("listing events in test namespace") - foundCreatedEvent = eventExistsInList(client, f.Namespace.Name, eventName) + foundCreatedEvent = eventExistsInList(ctx, client, f.Namespace.Name, eventName) if foundCreatedEvent { framework.Failf("Should not have found test event %s in namespace %s, in list with namespace scope after deletion", eventName, f.Namespace.Name) } @@ -209,12 +209,12 @@ var _ = common.SIGDescribe("Events API", func() { ginkgo.By("Create set of events") for _, eventName := range eventNames { - _, err := client.Create(context.TODO(), newTestEvent(f.Namespace.Name, eventName, "testevent-set"), metav1.CreateOptions{}) + _, err := client.Create(ctx, newTestEvent(f.Namespace.Name, eventName, "testevent-set"), metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create event") } ginkgo.By("get a list of Events with a label in the current namespace") - eventList, err := client.List(context.TODO(), metav1.ListOptions{ + eventList, err := client.List(ctx, metav1.ListOptions{ LabelSelector: "testevent-set=true", }) framework.ExpectNoError(err, "failed to get a list of events") @@ -222,13 +222,13 @@ var _ = common.SIGDescribe("Events API", func() { ginkgo.By("delete a list of events") framework.Logf("requesting DeleteCollection of events") - err = client.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = client.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "testevent-set=true", }) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("check that the list of events matches the requested quantity") - eventList, err = client.List(context.TODO(), metav1.ListOptions{ + eventList, err = client.List(ctx, metav1.ListOptions{ LabelSelector: "testevent-set=true", }) framework.ExpectNoError(err, "failed to get a list of events") diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index 60096ebfe2f..f7a7c60bc15 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -67,7 +67,7 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti defer ginkgo.GinkgoRecover() wave := fmt.Sprintf("wave%v", strconv.Itoa(i)) framework.Logf("Starting logging soak, wave = %v", wave) - RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime) + RunLogPodsWithSleepOf(ctx, f, kbRateInSeconds, wave, totalLogTime) framework.Logf("Completed logging soak, wave %v", i) }(i) // Niceness. @@ -80,9 +80,9 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti // RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string // was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs. -func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) { +func RunLogPodsWithSleepOf(ctx context.Context, f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) { - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) totalPods := len(nodes.Items) framework.ExpectNotEqual(totalPods, 0) @@ -91,6 +91,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname appName := "logging-soak" + podname podlables := e2enode.CreatePodsPerNodeForSimpleApp( + ctx, f.ClientSet, f.Namespace.Name, appName, @@ -127,7 +128,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname ) largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete. - pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness) + pods, err := logSoakVerification.WaitFor(ctx, totalPods, timeout+largeClusterForgiveness) if err != nil { framework.Failf("Error in wait... %v", err) diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index a74a3a82eca..0c0358a2eeb 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -57,15 +57,14 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func(ctx context.Context) { - testStackdriverAcceleratorMonitoring(f) + testStackdriverAcceleratorMonitoring(ctx, f) }) }) -func testStackdriverAcceleratorMonitoring(f *framework.Framework) { +func testStackdriverAcceleratorMonitoring(ctx context.Context, f *framework.Framework) { projectID := framework.TestContext.CloudConfig.ProjectID - ctx := context.Background() client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) framework.ExpectNoError(err) @@ -80,9 +79,9 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) { gcmService.BasePath = basePathOverride } - scheduling.SetupNVIDIAGPUNode(f, false) + scheduling.SetupNVIDIAGPUNode(ctx, f, false) - e2epod.NewPodClient(f).Create(&v1.Pod{ + e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: rcName, }, diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index f4d1ebf50f9..b03dfae881d 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -68,7 +68,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { restMapper.Reset() apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient) customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter) - testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) + testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) }) ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) { @@ -83,7 +83,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { restMapper.Reset() apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient) customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter) - testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) + testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) }) ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func(ctx context.Context) { @@ -93,14 +93,13 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { framework.Failf("Failed to load config: %s", err) } externalMetricsClient := externalclient.NewForConfigOrDie(config) - testExternalMetrics(f, kubeClient, externalMetricsClient) + testExternalMetrics(ctx, f, kubeClient, externalMetricsClient) }) }) -func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) { +func testCustomMetrics(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) { projectID := framework.TestContext.CloudConfig.ProjectID - ctx := context.Background() client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) framework.ExpectNoError(err) @@ -122,14 +121,14 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c } ginkgo.DeferCleanup(CleanupAdapter, adapterDeployment) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, HPAPermissions, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{}) // Run application that exports the metric - _, err = createSDExporterPods(f, kubeClient) + _, err = createSDExporterPods(ctx, f, kubeClient) if err != nil { framework.Failf("Failed to create stackdriver-exporter pod: %s", err) } @@ -144,10 +143,9 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c } // TODO(kawych): migrate this test to new resource model -func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) { +func testExternalMetrics(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) { projectID := framework.TestContext.CloudConfig.ProjectID - ctx := context.Background() client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) framework.ExpectNoError(err) @@ -170,14 +168,14 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, } ginkgo.DeferCleanup(CleanupAdapter, AdapterForOldResourceModel) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, HPAPermissions, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{}) // Run application that exports the metric - pod, err := createSDExporterPods(f, kubeClient) + pod, err := createSDExporterPods(ctx, f, kubeClient) if err != nil { framework.Failf("Failed to create stackdriver-exporter pod: %s", err) } @@ -259,22 +257,22 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric } } -func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { - err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod1, metav1.DeleteOptions{}) +func cleanupSDExporterPod(ctx context.Context, f *framework.Framework, cs clientset.Interface) { + err := cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, stackdriverExporterPod1, metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) } - err = cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod2, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, stackdriverExporterPod2, metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) } } -func createSDExporterPods(f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue), metav1.CreateOptions{}) +func createSDExporterPods(ctx context.Context, f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) { + pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue), metav1.CreateOptions{}) if err != nil { return nil, err } - _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue), metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue), metav1.CreateOptions{}) return pod, err } diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index b3bea127270..4d2df772a05 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -39,12 +39,12 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var c, ec clientset.Interface var grabber *e2emetrics.Grabber - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var err error c = f.ClientSet ec = f.KubemarkExternalClusterClientSet - gomega.Eventually(func() error { - grabber, err = e2emetrics.NewMetricsGrabber(c, ec, f.ClientConfig(), true, true, true, true, true, true) + gomega.Eventually(ctx, func() error { + grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true) if err != nil { return fmt.Errorf("failed to create metrics grabber: %v", err) } @@ -54,7 +54,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from API server.", func(ctx context.Context) { ginkgo.By("Connecting to /metrics endpoint") - response, err := grabber.GrabFromAPIServer() + response, err := grabber.GrabFromAPIServer(ctx) if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { e2eskipper.Skipf("%v", err) } @@ -64,19 +64,19 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from a Kubelet.", func(ctx context.Context) { ginkgo.By("Proxying to Node through the API server") - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { e2eskipper.Skipf("%v", err) } framework.ExpectNoError(err) - response, err := grabber.GrabFromKubelet(node.Name) + response, err := grabber.GrabFromKubelet(ctx, node.Name) framework.ExpectNoError(err) gomega.Expect(response).NotTo(gomega.BeEmpty()) }) ginkgo.It("should grab all metrics from a Scheduler.", func(ctx context.Context) { ginkgo.By("Proxying to Pod through the API server") - response, err := grabber.GrabFromScheduler() + response, err := grabber.GrabFromScheduler(ctx) if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { e2eskipper.Skipf("%v", err) } @@ -86,7 +86,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from a ControllerManager.", func(ctx context.Context) { ginkgo.By("Proxying to Pod through the API server") - response, err := grabber.GrabFromControllerManager() + response, err := grabber.GrabFromControllerManager(ctx) if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { e2eskipper.Skipf("%v", err) } diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index 1125a795c89..b2cc0ce7505 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -69,15 +69,14 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func(ctx context.Context) { - testStackdriverMonitoring(f, 1, 100, 200) + testStackdriverMonitoring(ctx, f, 1, 100, 200) }) }) -func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) { +func testStackdriverMonitoring(ctx context.Context, f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) { projectID := framework.TestContext.CloudConfig.ProjectID - ctx := context.Background() client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) framework.ExpectNoError(err) @@ -105,10 +104,10 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per framework.ExpectNoError(err) - rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) + rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) ginkgo.DeferCleanup(rc.CleanUp) - rc.WaitForReplicas(pods, 15*time.Minute) + rc.WaitForReplicas(ctx, pods, 15*time.Minute) metricsMap := map[string]bool{} pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU) diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 8544748e002..b74594e4e35 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -56,11 +56,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func(ctx context.Context) { kubeClient = f.ClientSet - testAgent(f, kubeClient) + testAgent(ctx, f, kubeClient) }) }) -func testAgent(f *framework.Framework, kubeClient clientset.Interface) { +func testAgent(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface) { projectID := framework.TestContext.CloudConfig.ProjectID resourceType := "k8s_container" uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix()) @@ -70,13 +70,13 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { resourceType, uniqueContainerName) - oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope) + oauthClient, err := google.DefaultClient(ctx, MonitoringScope) if err != nil { framework.Failf("Failed to create oauth client: %s", err) } // Create test pod with unique name. - _ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { + _ = e2epod.CreateExecPodOrFail(ctx, kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { pod.Spec.Containers[0].Name = uniqueContainerName }) ginkgo.DeferCleanup(kubeClient.CoreV1().Pods(f.Namespace.Name).Delete, uniqueContainerName, metav1.DeleteOptions{}) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 032e972afbc..3006df523c2 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -266,8 +266,8 @@ var _ = SIGDescribe("Kubectl client", func() { ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/}, }) } - forEachPod := func(podFunc func(p v1.Pod)) { - clusterState().ForEach(podFunc) + forEachPod := func(ctx context.Context, podFunc func(p v1.Pod)) { + _ = clusterState().ForEach(ctx, podFunc) } var c clientset.Interface var ns string @@ -280,11 +280,11 @@ var _ = SIGDescribe("Kubectl client", func() { // idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large // test files. // Print debug info if atLeast Pods are not found before the timeout - waitForOrFailWithDebug := func(atLeast int) { - pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout) + waitForOrFailWithDebug := func(ctx context.Context, atLeast int) { + pods, err := clusterState().WaitFor(ctx, atLeast, framework.PodStartTimeout) if err != nil || len(pods) < atLeast { // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it - e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, ns) framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) } } @@ -341,7 +341,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("creating a replication controller") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) /* @@ -354,15 +354,15 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("creating a replication controller") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling down the replication controller") debugDiscovery() e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m") - validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + validateController(ctx, c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling up the replication controller") debugDiscovery() e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m") - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) }) @@ -402,17 +402,17 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.By("validating guestbook app") - validateGuestbookApp(c, ns) + validateGuestbookApp(ctx, c, ns) }) }) ginkgo.Describe("Simple pod", func() { var podYaml string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") - framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{simplePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) @@ -579,7 +579,7 @@ var _ = SIGDescribe("Kubectl client", func() { if !strings.Contains(ee.String(), "timed out") { framework.Failf("Missing expected 'timed out' error, got: %#v", ee) } - e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)) }) ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) { @@ -613,7 +613,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach without stdin") // There is a race on this scenario described in #73099 @@ -629,7 +629,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -643,11 +643,11 @@ var _ = SIGDescribe("Kubectl client", func() { g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) framework.ExpectNoError(err) - if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { + if !e2epod.CheckPodsRunningReady(ctx, c, ns, []string{runTestPod.Name}, time.Minute) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) }) ginkgo.It("should contain last line of the log", func(ctx context.Context) { @@ -656,7 +656,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command with run") e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") - if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod for run-log-test was not ready") } @@ -693,11 +693,11 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("adding rbac permissions") // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace - err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name, + err := e2eauth.BindClusterRole(ctx, f.ClientSet.RbacV1(), "view", f.Namespace.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) framework.ExpectNoError(err) - err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(ctx, f.ClientSet.AuthorizationV1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) @@ -855,7 +855,7 @@ metadata: WithStdinReader(stdin). ExecOrDie(ns) ginkgo.By("checking the result") - forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration) + forEachReplicationController(ctx, c, ns, "app", "agnhost", validateReplicationControllerConfiguration) }) ginkgo.It("should reuse port when apply to an existing SVC", func(ctx context.Context) { serviceJSON := readTestFileOrDie(agnhostServiceFilename) @@ -969,7 +969,7 @@ metadata: e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server") ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage) - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } @@ -1281,10 +1281,10 @@ metadata: e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") ginkgo.By("Waiting for Agnhost primary to start.") - waitForOrFailWithDebug(1) + waitForOrFailWithDebug(ctx, 1) // Pod - forEachPod(func(pod v1.Pod) { + forEachPod(ctx, func(pod v1.Pod) { output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name) requiredStrings := [][]string{ {"Name:", "agnhost-primary-"}, @@ -1336,7 +1336,7 @@ metadata: // Node // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) node := nodes.Items[0] output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name) @@ -1377,7 +1377,7 @@ metadata: ginkgo.By("waiting for cronjob to start.") err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - cj, err := c.BatchV1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{}) + cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err) } @@ -1424,14 +1424,14 @@ metadata: // It may take a while for the pods to get registered in some cases, wait to be sure. ginkgo.By("Waiting for Agnhost primary to start.") - waitForOrFailWithDebug(1) - forEachPod(func(pod v1.Pod) { + waitForOrFailWithDebug(ctx, 1) + forEachPod(ctx, func(pod v1.Pod) { framework.Logf("wait on agnhost-primary startup in %v ", ns) e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout) }) validateService := func(name string, servicePort int, timeout time.Duration) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) { - ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{}) + ep, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { // log the real error framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) @@ -1462,7 +1462,7 @@ metadata: }) framework.ExpectNoError(err) - e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) + e2eservice, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) if len(e2eservice.Spec.Ports) != 1 { @@ -1479,23 +1479,23 @@ metadata: ginkgo.By("exposing RC") e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort)) - e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) + framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)) validateService("rm2", 1234, framework.ServiceStartTimeout) ginkgo.By("exposing service") e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort)) - e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) + framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)) validateService("rm3", 2345, framework.ServiceStartTimeout) }) }) ginkgo.Describe("Kubectl label", func() { var podYaml string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") - framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{pausePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, pausePodSelector) @@ -1530,11 +1530,11 @@ metadata: ginkgo.Describe("Kubectl copy", func() { var podYaml string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in"))) e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") - framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) @@ -1597,7 +1597,7 @@ metadata: } ginkgo.By("Waiting for log generator to start.") - if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod %s was not ready", podName) } @@ -1654,14 +1654,14 @@ metadata: ginkgo.By("creating Agnhost RC") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") ginkgo.By("Waiting for Agnhost primary to start.") - waitForOrFailWithDebug(1) + waitForOrFailWithDebug(ctx, 1) ginkgo.By("patching all pods") - forEachPod(func(pod v1.Pod) { + forEachPod(ctx, func(pod v1.Pod) { e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) ginkgo.By("checking annotations") - forEachPod(func(pod v1.Pod) { + forEachPod(ctx, func(pod v1.Pod) { found := false for key, val := range pod.Annotations { if key == "x" && val == "y" { @@ -1714,7 +1714,7 @@ metadata: ginkgo.By("running the image " + httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) ginkgo.By("verifying the pod " + podName + " was created") - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } @@ -1766,7 +1766,7 @@ metadata: e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-") ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) } @@ -1849,7 +1849,7 @@ metadata: Effect: v1.TaintEffectNoSchedule, } - nodeName := scheduling.GetNodeThatCanRunPod(f) + nodeName := scheduling.GetNodeThatCanRunPod(ctx, f) ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) @@ -1880,7 +1880,7 @@ metadata: Effect: v1.TaintEffectNoSchedule, } - nodeName := scheduling.GetNodeThatCanRunPod(f) + nodeName := scheduling.GetNodeThatCanRunPod(ctx, f) ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) @@ -1980,7 +1980,7 @@ metadata: e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000") ginkgo.By("verifying that the quota was created") - quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -2008,7 +2008,7 @@ metadata: e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating") ginkgo.By("verifying that the quota was created") - quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -2163,31 +2163,31 @@ func curl(url string) (string, error) { return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{})) } -func validateGuestbookApp(c clientset.Interface, ns string) { +func validateGuestbookApp(ctx context.Context, c clientset.Interface, ns string) { framework.Logf("Waiting for all frontend pods to be Running.") label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) framework.ExpectNoError(err) framework.Logf("Waiting for frontend to serve content.") - if !waitForGuestbookResponse(c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) { + if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) { framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) } framework.Logf("Trying to add a new entry to the guestbook.") - if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) { + if !waitForGuestbookResponse(ctx, c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) { framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) } framework.Logf("Verifying that added entry can be retrieved.") - if !waitForGuestbookResponse(c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) { + if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) { framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) } } // Returns whether received expected response from guestbook on time. -func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { - res, err := makeRequestToGuestbook(c, cmd, arg, ns) +func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool { + for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) { + res, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns) if err == nil && res == expectedResponse { return true } @@ -2196,13 +2196,13 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse return false } -func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) { +func makeRequestToGuestbook(ctx context.Context, c clientset.Interface, cmd, value string, ns string) (string, error) { proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { return "", errProxy } - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() result, err := proxyRequest.Namespace(ns). @@ -2244,13 +2244,13 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader { return bytes.NewReader(data) } -func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) { +func forEachReplicationController(ctx context.Context, c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) { var rcs *v1.ReplicationControllerList var err error - for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { + for t := time.Now(); time.Since(t) < framework.PodListTimeout && ctx.Err() == nil; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := metav1.ListOptions{LabelSelector: label.String()} - rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options) + rcs, err = c.CoreV1().ReplicationControllers(ns).List(ctx, options) framework.ExpectNoError(err) if len(rcs.Items) > 0 { break @@ -2281,13 +2281,13 @@ func validateReplicationControllerConfiguration(rc v1.ReplicationController) { // getUDData creates a validator function based on the input string (i.e. kitten.jpg). // For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg // in the container's json field. -func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error { +func getUDData(jpgExpected string, ns string) func(context.Context, clientset.Interface, string) error { // getUDData validates data.json in the update-demo (returns nil if data is ok). - return func(c clientset.Interface, podID string) error { + return func(ctx context.Context, c clientset.Interface, podID string) error { framework.Logf("validating pod %s", podID) - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() body, err := c.CoreV1().RESTClient().Get(). @@ -2296,7 +2296,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) SubResource("proxy"). Name(podID). Suffix("data.json"). - Do(context.TODO()). + Do(ctx). Raw() if err != nil { @@ -2373,7 +2373,7 @@ func trimDockerRegistry(imagename string) string { // validatorFn is the function which is individual tests will implement. // we may want it to return more than just an error, at some point. -type validatorFn func(c clientset.Interface, podID string) error +type validatorFn func(ctx context.Context, c clientset.Interface, podID string) error // validateController is a generic mechanism for testing RC's that are running. // It takes a container name, a test name, and a validator function which is plugged in by a specific test. @@ -2381,7 +2381,7 @@ type validatorFn func(c clientset.Interface, podID string) error // "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated. // "testname": which gets bubbled up to the logging/failure messages if errors happen. // "validator" function: This function is given a podID and a client, and it can do some specific validations that way. -func validateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { +func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { containerImage = trimDockerRegistry(containerImage) getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}" @@ -2391,7 +2391,7 @@ func validateController(c clientset.Interface, containerImage string, replicas i ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector waitLoop: - for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) { + for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) { getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) pods := strings.Fields(getPodsOutput) if numPods := len(pods); numPods != replicas { @@ -2415,7 +2415,7 @@ waitLoop: // Call the generic validator function here. // This might validate for example, that (1) getting a url works and (2) url is serving correct content. - if err := validator(c, podID); err != nil { + if err := validator(ctx, c, podID); err != nil { framework.Logf("%s is running right image but validator function failed: %v", podID, err) continue waitLoop } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 7947f5048f0..c31d0ce5f5a 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -124,8 +124,8 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi } // WaitForTerminatedContainer waits till a given container be terminated for a given pod. -func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error { - return e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { +func WaitForTerminatedContainer(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerName string) error { + return e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { if len(testutils.TerminatedContainers(pod)[containerName]) > 0 { return true, nil } @@ -207,13 +207,13 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { } } -func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { +func doTestConnectSendDisconnect(ctx context.Context, bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } - if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { + if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { framework.Failf("Pod did not start running: %v", err) } @@ -242,26 +242,26 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { } ginkgo.By("Waiting for the target pod to stop running") - if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { + if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Done"), )) } -func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { +func doTestMustConnectSendNothing(ctx context.Context, bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } - if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { + if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { framework.Failf("Pod did not start running: %v", err) } @@ -279,26 +279,26 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { conn.Close() ginkgo.By("Waiting for the target pod to stop running") - if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { + if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), )) } -func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { +func doTestMustConnectSendDisconnect(ctx context.Context, bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } - if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { + if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { framework.Failf("Pod did not start running: %v", err) } @@ -330,7 +330,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) } if e, a := strings.Repeat("x", 100), string(fromServer); e != a { - podlogs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + podlogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Logf("Failed to get logs of portforwardtester pod: %v", err) } else { @@ -345,13 +345,13 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) } ginkgo.By("Waiting for the target pod to stop running") - if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { + if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Received expected client data"), @@ -359,16 +359,16 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) )) } -func doTestOverWebSockets(bindAddress string, f *framework.Framework) { +func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.Framework) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "unable to get base config") ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Couldn't create pod: %v", err) } - if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { + if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil { framework.Failf("Pod did not start running: %v", err) } @@ -386,7 +386,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { } defer ws.Close() - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) @@ -400,7 +400,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return nil }, time.Minute, 10*time.Second).Should(gomega.Succeed()) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) @@ -423,7 +423,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Reading data from the local port") buf := bytes.Buffer{} expectedData := bytes.Repeat([]byte("x"), 100) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) @@ -439,8 +439,8 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { }, time.Minute, 10*time.Second).Should(gomega.Succeed()) ginkgo.By("Verifying logs") - gomega.Eventually(func() (string, error) { - return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + gomega.Eventually(ctx, func() (string, error) { + return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Received expected client data"), @@ -454,21 +454,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { ginkgo.Describe("With a server listening on 0.0.0.0", func() { ginkgo.Describe("that expects a client request", func() { ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) { - doTestMustConnectSendNothing("0.0.0.0", f) + doTestMustConnectSendNothing(ctx, "0.0.0.0", f) }) ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { - doTestMustConnectSendDisconnect("0.0.0.0", f) + doTestMustConnectSendDisconnect(ctx, "0.0.0.0", f) }) }) ginkgo.Describe("that expects NO client request", func() { ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { - doTestConnectSendDisconnect("0.0.0.0", f) + doTestConnectSendDisconnect(ctx, "0.0.0.0", f) }) }) ginkgo.It("should support forwarding over websockets", func(ctx context.Context) { - doTestOverWebSockets("0.0.0.0", f) + doTestOverWebSockets(ctx, "0.0.0.0", f) }) }) @@ -476,21 +476,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { ginkgo.Describe("With a server listening on localhost", func() { ginkgo.Describe("that expects a client request", func() { ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) { - doTestMustConnectSendNothing("localhost", f) + doTestMustConnectSendNothing(ctx, "localhost", f) }) ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { - doTestMustConnectSendDisconnect("localhost", f) + doTestMustConnectSendDisconnect(ctx, "localhost", f) }) }) ginkgo.Describe("that expects NO client request", func() { ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { - doTestConnectSendDisconnect("localhost", f) + doTestConnectSendDisconnect(ctx, "localhost", f) }) }) ginkgo.It("should support forwarding over websockets", func(ctx context.Context) { - doTestOverWebSockets("localhost", f) + doTestOverWebSockets(ctx, "localhost", f) }) }) }) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index 20ba887d3ca..86e6bc84d05 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -43,10 +43,10 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { f := framework.NewDefaultFramework("bootstrap-signer") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(ctx, secretNeedClean, metav1.DeleteOptions{}) framework.ExpectNoError(err) secretNeedClean = "" } @@ -60,7 +60,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(ctx, secret, metav1.CreateOptions{}) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) @@ -75,7 +75,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID @@ -83,7 +83,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { err = WaitforSignedClusterInfoByBootStrapToken(c, tokenID) framework.ExpectNoError(err) - cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(ctx, bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) framework.ExpectNoError(err) signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] if !ok { @@ -95,14 +95,14 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { updatedKubeConfig, err := randBytes(20) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(ctx, cfgMap, metav1.UpdateOptions{}) framework.ExpectNoError(err) defer func() { ginkgo.By("update back the cluster-info ConfigMap") - cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(ctx, bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(ctx, cfgMap, metav1.UpdateOptions{}) framework.ExpectNoError(err) }() @@ -116,7 +116,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap secret be signed") @@ -124,7 +124,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) ginkgo.By("delete the bootstrap token secret") - err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.DeleteOptions{}) + err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(ctx, bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap token removed from cluster-info ConfigMap") diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index 01aeff34eea..a397af3c070 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -42,10 +42,10 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { c = f.ClientSet }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(ctx, secretNeedClean, metav1.DeleteOptions{}) secretNeedClean = "" framework.ExpectNoError(err) } @@ -59,7 +59,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(-time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -76,7 +76,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(ctx, secret, metav1.CreateOptions{}) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) diff --git a/test/e2e/network/conntrack.go b/test/e2e/network/conntrack.go index 3d22fb714a6..b1961253057 100644 --- a/test/e2e/network/conntrack.go +++ b/test/e2e/network/conntrack.go @@ -81,9 +81,9 @@ var _ = common.SIGDescribe("Conntrack", func() { clientNodeInfo, serverNodeInfo nodeInfo ) - logContainsFn := func(text, podName string) wait.ConditionFunc { - return func() (bool, error) { - logs, err := e2epod.GetPodLogs(cs, ns, podName, podName) + logContainsFn := func(text, podName string) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + logs, err := e2epod.GetPodLogs(ctx, cs, ns, podName, podName) if err != nil { // Retry the error next time. return false, nil @@ -95,11 +95,11 @@ var _ = common.SIGDescribe("Conntrack", func() { } } - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = fr.ClientSet ns = fr.Namespace.Name - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { e2eskipper.Skipf( @@ -134,7 +134,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // Create a NodePort service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a UDP service " + serviceName + " with type=NodePort in " + ns) - udpService, err := udpJig.CreateUDPService(func(svc *v1.Service) { + udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(80)}, @@ -150,10 +150,10 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - e2epod.NewPodClient(fr).CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(ctx, clientPod) // Read the client pod logs - logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) + logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) @@ -163,9 +163,9 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod1.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}}) // Note that the fact that Endpoints object already exists, does NOT mean // that iptables (or whatever else is used) was already programmed. @@ -173,8 +173,8 @@ var _ = common.SIGDescribe("Conntrack", func() { // 30 seconds by default. // Based on the above check if the pod receives the traffic. ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP) - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { - logs, err = e2epod.GetPodLogs(cs, ns, podClient, podClient) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { + logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) framework.Failf("Failed to connect to backend 1") @@ -186,19 +186,19 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod2.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod2) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) // Check that the second pod keeps receiving traffic // UDP conntrack entries timeout is 30 sec by default ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP) - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { - logs, err = e2epod.GetPodLogs(cs, ns, podClient, podClient) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { + logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) framework.Failf("Failed to connect to backend 2") @@ -210,7 +210,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // Create a ClusterIP service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns) - udpService, err := udpJig.CreateUDPService(func(svc *v1.Service) { + udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(80)}, @@ -226,10 +226,10 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - e2epod.NewPodClient(fr).CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(ctx, clientPod) // Read the client pod logs - logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) + logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) @@ -239,9 +239,9 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod1.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}}) // Note that the fact that Endpoints object already exists, does NOT mean // that iptables (or whatever else is used) was already programmed. @@ -249,8 +249,8 @@ var _ = common.SIGDescribe("Conntrack", func() { // 30 seconds by default. // Based on the above check if the pod receives the traffic. ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP) - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { - logs, err = e2epod.GetPodLogs(cs, ns, podClient, podClient) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { + logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) framework.Failf("Failed to connect to backend 1") @@ -262,19 +262,19 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod2.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod2) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) // Check that the second pod keeps receiving traffic // UDP conntrack entries timeout is 30 sec by default ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP) - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { - logs, err = e2epod.GetPodLogs(cs, ns, podClient, podClient) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { + logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) framework.Failf("Failed to connect to backend 2") @@ -297,7 +297,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // Create a ClusterIP service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns) - udpService, err := udpJig.CreateUDPService(func(svc *v1.Service) { + udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(80)}, @@ -313,10 +313,10 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - e2epod.NewPodClient(fr).CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(ctx, clientPod) // Read the client pod logs - logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) + logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) @@ -334,10 +334,10 @@ var _ = common.SIGDescribe("Conntrack", func() { }, } e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1) // wait until the endpoints are ready - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}}) // Note that the fact that Endpoints object already exists, does NOT mean // that iptables (or whatever else is used) was already programmed. @@ -345,8 +345,8 @@ var _ = common.SIGDescribe("Conntrack", func() { // 30 seconds by default. // Based on the above check if the pod receives the traffic. ginkgo.By("checking client pod connected to the backend on Node IP " + serverNodeInfo.nodeIP) - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { - logs, err = e2epod.GetPodLogs(cs, ns, podClient, podClient) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { + logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) framework.Failf("Failed to connect to backend pod") @@ -411,7 +411,7 @@ var _ = common.SIGDescribe("Conntrack", func() { } nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(serverPod) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPod) ginkgo.By("Server pod created on node " + serverNodeInfo.name) svc := &v1.Service{ @@ -428,7 +428,7 @@ var _ = common.SIGDescribe("Conntrack", func() { }, }, } - _, err := fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Server service created") @@ -453,7 +453,7 @@ var _ = common.SIGDescribe("Conntrack", func() { nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name} e2epod.SetNodeSelection(&pod.Spec, nodeSelection) - e2epod.NewPodClient(fr).CreateSync(pod) + e2epod.NewPodClient(fr).CreateSync(ctx, pod) ginkgo.By("Client pod created") // The client will open connections against the server @@ -462,14 +462,14 @@ var _ = common.SIGDescribe("Conntrack", func() { // so the client will receive an unexpected TCP connection and RST the connection // the server will log ERROR if that happens ginkgo.By("checking client pod does not RST the TCP connection because it receives an INVALID packet") - if err := wait.PollImmediate(5*time.Second, time.Minute, logContainsFn("ERROR", "boom-server")); err == nil { - logs, err := e2epod.GetPodLogs(cs, ns, "boom-server", "boom-server") + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn("ERROR", "boom-server")); err == nil { + logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server") framework.ExpectNoError(err) framework.Logf("boom-server pod logs: %s", logs) framework.Failf("boom-server pod received a RST from the client") } - logs, err := e2epod.GetPodLogs(cs, ns, "boom-server", "boom-server") + logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server") framework.ExpectNoError(err) if !strings.Contains(string(logs), "connection established") { framework.Logf("boom-server pod logs: %s", logs) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 2b76a077469..d9ebec996dd 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -63,7 +63,7 @@ var _ = common.SIGDescribe("DNS", func() { // Run a pod which probes DNS and exposes the results by HTTP. ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) // Added due to #8512. This is critical for GCE and GKE deployments. @@ -85,7 +85,7 @@ var _ = common.SIGDescribe("DNS", func() { // Run a pod which probes DNS and exposes the results by HTTP. ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) // [LinuxOnly]: As Windows currently does not support resolving PQDNs. @@ -106,7 +106,7 @@ var _ = common.SIGDescribe("DNS", func() { // Run a pod which probes DNS and exposes the results by HTTP. ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -126,7 +126,7 @@ var _ = common.SIGDescribe("DNS", func() { // Run a pod which probes /etc/hosts and exposes the results by HTTP. ginkgo.By("creating a pod to probe /etc/hosts") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -142,7 +142,7 @@ var _ = common.SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the test headless service") @@ -151,7 +151,7 @@ var _ = common.SIGDescribe("DNS", func() { regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, regularService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) ginkgo.DeferCleanup(func(ctx context.Context) error { @@ -179,7 +179,7 @@ var _ = common.SIGDescribe("DNS", func() { pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -195,7 +195,7 @@ var _ = common.SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the test headless service") @@ -204,7 +204,7 @@ var _ = common.SIGDescribe("DNS", func() { regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, regularService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the test service") @@ -232,7 +232,7 @@ var _ = common.SIGDescribe("DNS", func() { pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -250,7 +250,7 @@ var _ = common.SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) ginkgo.DeferCleanup(func(ctx context.Context) error { @@ -274,7 +274,7 @@ var _ = common.SIGDescribe("DNS", func() { pod1.Spec.Hostname = podHostname pod1.Spec.Subdomain = serviceName - validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -292,7 +292,7 @@ var _ = common.SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) ginkgo.DeferCleanup(func(ctx context.Context) error { @@ -317,7 +317,7 @@ var _ = common.SIGDescribe("DNS", func() { pod1.Spec.Hostname = podHostname pod1.Spec.Subdomain = serviceName - validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod1, append(wheezyFileNames, jessieFileNames...)) }) /* @@ -331,7 +331,7 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := e2eservice.CreateServiceSpec(serviceName, "foo.example.com", false, nil) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, externalNameService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName) ginkgo.DeferCleanup(func(ctx context.Context) error { @@ -349,11 +349,11 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") + validateTargetedProbeOutput(ctx, f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") // Test changing the externalName field ginkgo.By("changing the externalName to bar.example.com") - _, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(ctx, f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.ExternalName = "bar.example.com" }) framework.ExpectNoError(err, "failed to change externalName of service: %s", serviceName) @@ -366,11 +366,11 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.By("creating a second pod to probe DNS") pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.") + validateTargetedProbeOutput(ctx, f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.") // Test changing type from ExternalName to ClusterIP ginkgo.By("changing the service to type=ClusterIP") - _, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(ctx, f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, @@ -392,10 +392,10 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.By("creating a third pod to probe DNS") pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(context.TODO(), externalNameService.Name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(ctx, externalNameService.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get service: %s", externalNameService.Name) - validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) + validateTargetedProbeOutput(ctx, f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) }) /* @@ -414,14 +414,14 @@ var _ = common.SIGDescribe("DNS", func() { Nameservers: []string{testServerIP}, Searches: []string{testSearchPath}, } - testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod, metav1.CreateOptions{}) + testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, testAgnhostPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name) framework.Logf("Created pod %v", testAgnhostPod) ginkgo.DeferCleanup(func(ctx context.Context) error { framework.Logf("Deleting pod %s...", testAgnhostPod.Name) - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0)) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, testAgnhostPod.Name, *metav1.NewDeleteOptions(0)) }) - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, testAgnhostPod.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, testAgnhostPod.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testAgnhostPod.Name) runCommand := func(arg string) string { @@ -461,7 +461,7 @@ var _ = common.SIGDescribe("DNS", func() { corednsConfig := generateCoreDNSConfigmap(f.Namespace.Name, map[string]string{ testDNSNameFull: testInjectedIP, }) - corednsConfig, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), corednsConfig, metav1.CreateOptions{}) + corednsConfig, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, corednsConfig, metav1.CreateOptions{}) framework.ExpectNoError(err, "unable to create test configMap %s", corednsConfig.Name) ginkgo.DeferCleanup(func(ctx context.Context) error { @@ -470,18 +470,18 @@ var _ = common.SIGDescribe("DNS", func() { }) testServerPod := generateCoreDNSServerPod(corednsConfig) - testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod, metav1.CreateOptions{}) + testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, testServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name) framework.Logf("Created pod %v", testServerPod) ginkgo.DeferCleanup(func(ctx context.Context) error { framework.Logf("Deleting pod %s...", testServerPod.Name) - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0)) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, testServerPod.Name, *metav1.NewDeleteOptions(0)) }) - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testServerPod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, testServerPod.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name) // Retrieve server pod IP. - testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), testServerPod.Name, metav1.GetOptions{}) + testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, testServerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %v", testServerPod.Name) testServerIP := testServerPod.Status.PodIP framework.Logf("testServerIP is %s", testServerIP) @@ -500,14 +500,14 @@ var _ = common.SIGDescribe("DNS", func() { }, }, } - testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{}) + testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, testUtilsPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name) framework.Logf("Created pod %v", testUtilsPod) ginkgo.DeferCleanup(func(ctx context.Context) error { framework.Logf("Deleting pod %s...", testUtilsPod.Name) - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, testUtilsPod.Name, *metav1.NewDeleteOptions(0)) }) - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, testUtilsPod.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name) ginkgo.By("Verifying customized DNS option is configured on pod...") @@ -560,7 +560,7 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.It("should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters", func(ctx context.Context) { ginkgo.By("Getting the kube-dns IP") - svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(ctx, "kube-dns", metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get kube-dns service") kubednsIP := svc.Spec.ClusterIP @@ -599,7 +599,7 @@ var _ = common.SIGDescribe("DNS", func() { }, }, } - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) }) @@ -615,12 +615,12 @@ var _ = common.SIGDescribe("DNS HostNetwork", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, headlessService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, regularService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) // All the names we need to be able to resolve. @@ -645,7 +645,7 @@ var _ = common.SIGDescribe("DNS HostNetwork", func() { pod.ObjectMeta.Labels = testServiceSelector pod.Spec.HostNetwork = true pod.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet - validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) + validateDNSResults(ctx, f, pod, append(wheezyFileNames, jessieFileNames...)) }) }) diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 03c596233f5..7af7ca69476 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -68,13 +68,13 @@ func newDNSTestCommon() dnsTestCommon { } } -func (t *dnsTestCommon) init() { +func (t *dnsTestCommon) init(ctx context.Context) { ginkgo.By("Finding a DNS pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} namespace := "kube-system" - pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), options) + pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(ctx, options) framework.ExpectNoError(err, "failed to list pods in namespace: %s", namespace) gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1)) @@ -144,7 +144,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string { return newLineRegexp.Split(stdout, -1) } -func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { +func (t *dnsTestCommon) setConfigMap(ctx context.Context, cm *v1.ConfigMap) { if t.cm != nil { t.cm = cm } @@ -158,39 +158,39 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { "metadata.name": t.name, }.AsSelector().String(), } - cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(context.TODO(), options) + cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(ctx, options) framework.ExpectNoError(err, "failed to list ConfigMaps in namespace: %s", t.ns) if len(cmList.Items) == 0 { ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(ctx, cm, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) } else { ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(ctx, cm, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) } } -func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { +func (t *dnsTestCommon) fetchDNSConfigMapData(ctx context.Context) map[string]string { if t.name == "coredns" { - pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), t.name, metav1.GetOptions{}) + pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, t.name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get DNS ConfigMap: %s", t.name) return pcm.Data } return nil } -func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { +func (t *dnsTestCommon) restoreDNSConfigMap(ctx context.Context, configMapData map[string]string) { if t.name == "coredns" { - t.setConfigMap(&v1.ConfigMap{Data: configMapData}) - t.deleteCoreDNSPods() + t.setConfigMap(ctx, &v1.ConfigMap{Data: configMapData}) + t.deleteCoreDNSPods(ctx) } else { - t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, metav1.DeleteOptions{}) + framework.ExpectNoError(t.c.CoreV1().ConfigMaps(t.ns).Delete(ctx, t.name, metav1.DeleteOptions{})) } } -func (t *dnsTestCommon) createUtilPodLabel(baseName string) { +func (t *dnsTestCommon) createUtilPodLabel(ctx context.Context, baseName string) { // Actual port # doesn't matter, just needs to exist. const servicePort = 10101 podName := fmt.Sprintf("%s-%s", baseName, string(uuid.NewUUID())) @@ -198,10 +198,10 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { t.utilPod = e2epod.NewAgnhostPod(t.f.Namespace.Name, podName, nil, nil, ports) var err error - t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod, metav1.CreateOptions{}) + t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(ctx, t.utilPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod) framework.Logf("Created pod %v", t.utilPod) - err = e2epod.WaitForPodNameRunningInNamespace(t.f.ClientSet, t.utilPod.Name, t.f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, t.f.ClientSet, t.utilPod.Name, t.f.Namespace.Name) framework.ExpectNoError(err, "pod failed to start running: %v", t.utilPod) t.utilService = &v1.Service{ @@ -224,31 +224,31 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { }, } - t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(context.TODO(), t.utilService, metav1.CreateOptions{}) + t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(ctx, t.utilService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) framework.Logf("Created service %v", t.utilService) } -func (t *dnsTestCommon) deleteUtilPod() { +func (t *dnsTestCommon) deleteUtilPod(ctx context.Context) { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(context.TODO(), t.utilPod.Name, *metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(ctx, t.utilPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.utilPod.Name, err) } } // deleteCoreDNSPods manually deletes the CoreDNS pods to apply the changes to the ConfigMap. -func (t *dnsTestCommon) deleteCoreDNSPods() { +func (t *dnsTestCommon) deleteCoreDNSPods(ctx context.Context) { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(context.TODO(), options) + pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(ctx, options) framework.ExpectNoError(err, "failed to list pods of kube-system with label %q", label.String()) podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem) for _, pod := range pods.Items { - err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod: %s", pod.Name) } } @@ -304,31 +304,31 @@ func generateCoreDNSConfigmap(namespaceName string, aRecords map[string]string) } } -func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) { +func (t *dnsTestCommon) createDNSPodFromObj(ctx context.Context, pod *v1.Pod) { t.dnsServerPod = pod var err error - t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod, metav1.CreateOptions{}) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(ctx, t.dnsServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod) framework.Logf("Created pod %v", t.dnsServerPod) - err = e2epod.WaitForPodNameRunningInNamespace(t.f.ClientSet, t.dnsServerPod.Name, t.f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, t.f.ClientSet, t.dnsServerPod.Name, t.f.Namespace.Name) framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod) - t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(context.TODO(), t.dnsServerPod.Name, metav1.GetOptions{}) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(ctx, t.dnsServerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod: %s", t.dnsServerPod.Name) } -func (t *dnsTestCommon) createDNSServer(namespace string, aRecords map[string]string) { +func (t *dnsTestCommon) createDNSServer(ctx context.Context, namespace string, aRecords map[string]string) { corednsConfig := generateCoreDNSConfigmap(namespace, aRecords) - corednsConfig, err := t.c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), corednsConfig, metav1.CreateOptions{}) + corednsConfig, err := t.c.CoreV1().ConfigMaps(namespace).Create(ctx, corednsConfig, metav1.CreateOptions{}) if err != nil { framework.Failf("unable to create test configMap %s: %v", corednsConfig.Name, err) } - t.createDNSPodFromObj(generateCoreDNSServerPod(corednsConfig)) + t.createDNSPodFromObj(ctx, generateCoreDNSServerPod(corednsConfig)) } -func (t *dnsTestCommon) createDNSServerWithPtrRecord(namespace string, isIPv6 bool) { +func (t *dnsTestCommon) createDNSServerWithPtrRecord(ctx context.Context, namespace string, isIPv6 bool) { // NOTE: PTR records are generated automatically by CoreDNS. So, if we're creating A records, we're // going to also have PTR records. See: https://coredns.io/plugins/hosts/ var aRecords map[string]string @@ -337,12 +337,12 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(namespace string, isIPv6 bo } else { aRecords = map[string]string{"my.test": "192.0.2.123"} } - t.createDNSServer(namespace, aRecords) + t.createDNSServer(ctx, namespace, aRecords) } -func (t *dnsTestCommon) deleteDNSServerPod() { +func (t *dnsTestCommon) deleteDNSServerPod(ctx context.Context) { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(ctx, t.dnsServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.dnsServerPod.Name, err) } @@ -445,17 +445,17 @@ func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePre return probeCmd, fileName } -func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { - assertFilesContain(fileNames, fileDir, pod, client, false, "") +func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) { + assertFilesContain(ctx, fileNames, fileDir, pod, client, false, "") } -func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) { +func assertFilesContain(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) { var failed []string - framework.ExpectNoError(wait.PollImmediate(time.Second*5, time.Second*600, func() (bool, error) { + framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { failed = []string{} - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() for _, fileName := range fileNames { @@ -488,54 +488,54 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client framework.ExpectEqual(len(failed), 0) } -func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { +func validateDNSResults(ctx context.Context, f *framework.Framework, pod *v1.Pod, fileNames []string) { ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the pod") return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) }) - if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := podClient.Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("retrieving the pod") - pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find results for each expected name. ginkgo.By("looking for the results for each expected name from probers") - assertFilesExist(fileNames, "results", pod, f.ClientSet) + assertFilesExist(ctx, fileNames, "results", pod, f.ClientSet) // TODO: probe from the host, too. framework.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name) } -func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { +func validateTargetedProbeOutput(ctx context.Context, f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the pod") - return podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) }) - if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := podClient.Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("retrieving the pod") - pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find the expected value for each expected name. ginkgo.By("looking for the results for each expected name from probers") - assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value) + assertFilesContain(ctx, fileNames, "results", pod, f.ClientSet, true, value) framework.Logf("DNS probes using %s succeeded\n", pod.Name) } diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index dfdaf374958..f44de0d77e6 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -41,22 +41,22 @@ type dnsNameserverTest struct { dnsTestCommon } -func (t *dnsNameserverTest) run(isIPv6 bool) { - t.init() +func (t *dnsNameserverTest) run(ctx context.Context, isIPv6 bool) { + t.init(ctx) - t.createUtilPodLabel("e2e-dns-configmap") + t.createUtilPodLabel(ctx, "e2e-dns-configmap") ginkgo.DeferCleanup(t.deleteUtilPod) - originalConfigMapData := t.fetchDNSConfigMapData() + originalConfigMapData := t.fetchDNSConfigMapData(ctx) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData) if isIPv6 { - t.createDNSServer(t.f.Namespace.Name, map[string]string{ + t.createDNSServer(ctx, t.f.Namespace.Name, map[string]string{ "abc.acme.local": "2606:4700:4700::1111", "def.acme.local": "2606:4700:4700::2222", "widget.local": "2606:4700:4700::3333", }) } else { - t.createDNSServer(t.f.Namespace.Name, map[string]string{ + t.createDNSServer(ctx, t.f.Namespace.Name, map[string]string{ "abc.acme.local": "1.1.1.1", "def.acme.local": "2.2.2.2", "widget.local": "3.3.3.3", @@ -65,7 +65,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) { ginkgo.DeferCleanup(t.deleteDNSServerPod) if t.name == "coredns" { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "Corefile": fmt.Sprintf(`.:53 { health ready @@ -81,9 +81,9 @@ func (t *dnsNameserverTest) run(isIPv6 bool) { }`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP), }}) - t.deleteCoreDNSPods() + t.deleteCoreDNSPods(ctx) } else { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP), "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), }}) @@ -123,7 +123,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) { moreForeverTestTimeout) } - t.restoreDNSConfigMap(originalConfigMapData) + t.restoreDNSConfigMap(ctx, originalConfigMapData) // Wait for the deleted ConfigMap to take effect, otherwise the // configuration can bleed into other tests. t.checkDNSRecordFrom( @@ -137,15 +137,15 @@ type dnsPtrFwdTest struct { dnsTestCommon } -func (t *dnsPtrFwdTest) run(isIPv6 bool) { - t.init() +func (t *dnsPtrFwdTest) run(ctx context.Context, isIPv6 bool) { + t.init(ctx) - t.createUtilPodLabel("e2e-dns-configmap") + t.createUtilPodLabel(ctx, "e2e-dns-configmap") ginkgo.DeferCleanup(t.deleteUtilPod) - originalConfigMapData := t.fetchDNSConfigMapData() + originalConfigMapData := t.fetchDNSConfigMapData(ctx) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData) - t.createDNSServerWithPtrRecord(t.f.Namespace.Name, isIPv6) + t.createDNSServerWithPtrRecord(ctx, t.f.Namespace.Name, isIPv6) ginkgo.DeferCleanup(t.deleteDNSServerPod) // Should still be able to lookup public nameserver without explicit upstream nameserver set. @@ -164,7 +164,7 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) { } if t.name == "coredns" { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "Corefile": fmt.Sprintf(`.:53 { health ready @@ -177,9 +177,9 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) { }`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP), }}) - t.deleteCoreDNSPods() + t.deleteCoreDNSPods(ctx) } else { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), }}) } @@ -191,7 +191,7 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) { "ptr-record", moreForeverTestTimeout) - t.restoreDNSConfigMap(originalConfigMapData) + t.restoreDNSConfigMap(ctx, originalConfigMapData) t.checkDNSRecordFrom( "2001:db8::29", func(actual []string) bool { return len(actual) == 0 }, @@ -205,7 +205,7 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) { "ptr-record", moreForeverTestTimeout) - t.restoreDNSConfigMap(originalConfigMapData) + t.restoreDNSConfigMap(ctx, originalConfigMapData) t.checkDNSRecordFrom( "192.0.2.123", func(actual []string) bool { return len(actual) == 0 }, @@ -218,21 +218,21 @@ type dnsExternalNameTest struct { dnsTestCommon } -func (t *dnsExternalNameTest) run(isIPv6 bool) { - t.init() +func (t *dnsExternalNameTest) run(ctx context.Context, isIPv6 bool) { + t.init(ctx) - t.createUtilPodLabel("e2e-dns-configmap") + t.createUtilPodLabel(ctx, "e2e-dns-configmap") ginkgo.DeferCleanup(t.deleteUtilPod) - originalConfigMapData := t.fetchDNSConfigMapData() + originalConfigMapData := t.fetchDNSConfigMapData(ctx) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData) fooHostname := "foo.example.com" if isIPv6 { - t.createDNSServer(t.f.Namespace.Name, map[string]string{ + t.createDNSServer(ctx, t.f.Namespace.Name, map[string]string{ fooHostname: "2001:db8::29", }) } else { - t.createDNSServer(t.f.Namespace.Name, map[string]string{ + t.createDNSServer(ctx, t.f.Namespace.Name, map[string]string{ fooHostname: "192.0.2.123", }) } @@ -241,13 +241,13 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { f := t.f serviceName := "dns-externalname-upstream-test" externalNameService := e2eservice.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, externalNameService, metav1.CreateOptions{}); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameService.Name, metav1.DeleteOptions{}) serviceNameLocal := "dns-externalname-upstream-local" externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, externalNameServiceLocal, metav1.CreateOptions{}); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameServiceLocal.Name, metav1.DeleteOptions{}) @@ -271,7 +271,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { } if t.name == "coredns" { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "Corefile": fmt.Sprintf(`.:53 { health ready @@ -284,9 +284,9 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { }`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP), }}) - t.deleteCoreDNSPods() + t.deleteCoreDNSPods(ctx) } else { - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + t.setConfigMap(ctx, &v1.ConfigMap{Data: map[string]string{ "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), }}) } @@ -308,7 +308,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { moreForeverTestTimeout) } - t.restoreDNSConfigMap(originalConfigMapData) + t.restoreDNSConfigMap(ctx, originalConfigMapData) } var _ = common.SIGDescribe("DNS configMap nameserver", func() { @@ -318,7 +318,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func(ctx context.Context) { nsTest.c = nsTest.f.ClientSet - nsTest.run(framework.TestContext.ClusterIsIPv6()) + nsTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) }) @@ -327,7 +327,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { fwdTest.c = fwdTest.f.ClientSet - fwdTest.run(framework.TestContext.ClusterIsIPv6()) + fwdTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) }) @@ -336,7 +336,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { externalNameTest.c = externalNameTest.f.ClientSet - externalNameTest.run(framework.TestContext.ClusterIsIPv6()) + externalNameTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) }) }) diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 56bc766e346..33b46da7868 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -46,11 +46,11 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f := framework.NewDefaultFramework("performancedns") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) - e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute) + ginkgo.BeforeEach(func(ctx context.Context) { + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) + e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) - err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name) + err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) }) @@ -61,7 +61,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { var namespaces []string for i := 0; i < numNs; i++ { - ns, _ := f.CreateNamespace(f.BaseName, nil) + ns, _ := f.CreateNamespace(ctx, f.BaseName, nil) namespaces = append(namespaces, ns.Name) f.AddNamespacesToDelete(ns) } @@ -72,13 +72,13 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i])) } framework.Logf("Creating %v test services", maxServicesPerCluster) - workqueue.ParallelizeUntil(context.TODO(), parallelCreateServiceWorkers, len(services), createService) + workqueue.ParallelizeUntil(ctx, parallelCreateServiceWorkers, len(services), createService) dnsTest := dnsTestCommon{ f: f, c: f.ClientSet, ns: f.Namespace.Name, } - dnsTest.createUtilPodLabel("e2e-dns-scale-records") + dnsTest.createUtilPodLabel(ctx, "e2e-dns-scale-records") ginkgo.DeferCleanup(dnsTest.deleteUtilPod) framework.Logf("Querying %v%% of service records", checkServicePercent*100) for i := 0; i < len(services); i++ { @@ -86,7 +86,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { continue } s := services[i] - svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(context.TODO(), s.Name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(ctx, s.Name, metav1.GetOptions{}) framework.ExpectNoError(err) qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain) framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP) diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 5d473549396..0b77c53e745 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -58,7 +58,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.It("should have ipv4 and ipv6 internal node ip", func(ctx context.Context) { // TODO (aramase) can switch to new function to get all nodes - nodeList, err := e2enode.GetReadySchedulableNodes(cs) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) for _, node := range nodeList.Items { @@ -92,7 +92,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } ginkgo.By("submitting the pod to kubernetes") - p := podClient.CreateSync(pod) + p := podClient.CreateSync(ctx, pod) gomega.Expect(p.Status.PodIP).ShouldNot(gomega.BeEquivalentTo("")) gomega.Expect(p.Status.PodIPs).ShouldNot(gomega.BeNil()) @@ -107,7 +107,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } ginkgo.By("deleting the pod") - err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30)) + err := podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") }) @@ -120,7 +120,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // this is to ensure connectivity from all nodes on cluster // FIXME: tests may be run in large clusters. This test is O(n^2) in the // number of nodes used. It should use GetBoundedReadySchedulableNodes(). - nodeList, err := e2enode.GetReadySchedulableNodes(cs) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) replicas := int32(len(nodeList.Items)) @@ -181,10 +181,10 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }, } - serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), serverDeploymentSpec, metav1.CreateOptions{}) + serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(ctx, serverDeploymentSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) - clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{}) + clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(ctx, clientDeploymentSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) err = e2edeployment.WaitForDeploymentComplete(cs, serverDeployment) @@ -192,13 +192,13 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { err = e2edeployment.WaitForDeploymentComplete(cs, clientDeployment) framework.ExpectNoError(err) - serverPods, err := e2edeployment.GetPodsForDeployment(cs, serverDeployment) + serverPods, err := e2edeployment.GetPodsForDeployment(ctx, cs, serverDeployment) framework.ExpectNoError(err) - clientPods, err := e2edeployment.GetPodsForDeployment(cs, clientDeployment) + clientPods, err := e2edeployment.GetPodsForDeployment(ctx, cs, clientDeployment) framework.ExpectNoError(err) - assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80") + assertNetworkConnectivity(ctx, f, *serverPods, *clientPods, "dualstack-test-client", "80") }) ginkgo.It("should create a single stack service with cluster ip from primary service range", func(ctx context.Context) { @@ -218,7 +218,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { service := createService(t.ServiceName, t.Namespace, t.Labels, nil, nil) jig.Labels = t.Labels - err := jig.CreateServicePods(2) + err := jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) svc, err := t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) @@ -236,7 +236,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // ensure endpoint belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -270,7 +270,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { service := createService(t.ServiceName, t.Namespace, t.Labels, nil, expectedFamilies) jig.Labels = t.Labels - err := jig.CreateServicePods(2) + err := jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) svc, err := t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) @@ -282,7 +282,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -315,7 +315,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { service := createService(t.ServiceName, t.Namespace, t.Labels, nil, expectedFamilies) jig.Labels = t.Labels - err := jig.CreateServicePods(2) + err := jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) svc, err := t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) @@ -327,7 +327,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -360,7 +360,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { service := createService(t.ServiceName, t.Namespace, t.Labels, &expectedPolicy, expectedFamilies) jig.Labels = t.Labels - err := jig.CreateServicePods(2) + err := jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) svc, err := t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) @@ -372,7 +372,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -405,7 +405,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { service := createService(t.ServiceName, t.Namespace, t.Labels, &expectedPolicy, expectedFamilies) jig.Labels = t.Labels - err := jig.CreateServicePods(2) + err := jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) svc, err := t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) @@ -417,7 +417,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -435,134 +435,134 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.Describe("Granular Checks: Services Secondary IP Family [LinuxOnly]", func() { ginkgo.It("should function for pod-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeHTTPPort)) - err = config.DialFromTestContainer("http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeUDPPort)) - err = config.DialFromTestContainer("udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort)) - err := config.DialFromTestContainer("sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeSCTPPort)) - err = config.DialFromTestContainer("sctp", config.SecondaryNodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "sctp", config.SecondaryNodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for node-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromNode("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeHTTPPort)) - err = config.DialFromNode("http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for node-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromNode("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeUDPPort)) - err = config.DialFromNode("udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromEndpointContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.SecondaryNodeIP, config.NodeHTTPPort)) - err = config.DialFromEndpointContainer("http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromEndpointContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.SecondaryNodeIP, config.NodeUDPPort)) - err = config.DialFromEndpointContainer("udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should update endpoints: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } - config.DeleteNetProxyPod() + config.DeleteNetProxyPod(ctx) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should update endpoints: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } - config.DeleteNetProxyPod() + config.DeleteNetProxyPod(ctx) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err = config.DialFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -570,11 +570,11 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // [LinuxOnly]: Windows does not support session affinity. ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort)) // Check if number of endpoints returned are exactly one. - eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks) + eps, err := config.GetEndpointsFromTestContainer(ctx, "http", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks) if err != nil { framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } @@ -588,11 +588,11 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // [LinuxOnly]: Windows does not support session affinity. ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort)) // Check if number of endpoints returned are exactly one. - eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks) + eps, err := config.GetEndpointsFromTestContainer(ctx, "udp", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks) if err != nil { framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } @@ -605,34 +605,34 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }) ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) - config.DialEchoFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) + config.DialEchoFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) }) ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) message := "n" + strings.Repeat("o", 1999) - config.DialEchoFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) + config.DialEchoFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) }) // if the endpoints pods use hostNetwork, several tests can't run in parallel // because the pods will try to acquire the same port in the host. // We run the test in serial, to avoid port conflicts. ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) ginkgo.By("pod-Service(hostNetwork): http") ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeHTTPPort)) - err = config.DialFromTestContainer("http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -640,13 +640,13 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.By("node-Service(hostNetwork): http") ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromNode("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeHTTPPort)) - err = config.DialFromNode("http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -655,14 +655,14 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) - err = config.DialFromNode("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { time.Sleep(10 * time.Hour) framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeUDPPort)) - err = config.DialFromNode("udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -671,7 +671,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.SecondaryClusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) - err = config.DialEchoFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) + err = config.DialEchoFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -680,7 +680,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.SecondaryClusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) message = "n" + strings.Repeat("o", 1999) - err = config.DialEchoFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) + err = config.DialEchoFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -741,7 +741,7 @@ func validateEndpointsBelongToIPFamily(svc *v1.Service, endpoint *v1.Endpoints, } } -func assertNetworkConnectivity(f *framework.Framework, serverPods v1.PodList, clientPods v1.PodList, containerName, port string) { +func assertNetworkConnectivity(ctx context.Context, f *framework.Framework, serverPods v1.PodList, clientPods v1.PodList, containerName, port string) { // curl from each client pod to all server pods to assert connectivity duration := "10s" pollInterval := "1s" @@ -760,7 +760,7 @@ func assertNetworkConnectivity(f *framework.Framework, serverPods v1.PodList, cl for _, clientPod := range clientPods.Items { for _, ip := range serverIPs { - gomega.Consistently(func() error { + gomega.Consistently(ctx, func() error { ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %s", clientPod.Name, ip, port)) cmd := checkNetworkConnectivity(ip, port, timeout) _, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, clientPod.Name, containerName, cmd...) diff --git a/test/e2e/network/endpointslice.go b/test/e2e/network/endpointslice.go index 03f350c8094..ff04d19495f 100644 --- a/test/e2e/network/endpointslice.go +++ b/test/e2e/network/endpointslice.go @@ -67,17 +67,17 @@ var _ = common.SIGDescribe("EndpointSlice", func() { namespace := "default" name := "kubernetes" // verify "kubernetes.default" service exist - _, err := cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + _, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "error obtaining API server \"kubernetes\" Service resource on \"default\" namespace") // verify Endpoints for the API servers exist - endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + endpoints, err := cs.CoreV1().Endpoints(namespace).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "error obtaining API server \"kubernetes\" Endpoint resource on \"default\" namespace") if len(endpoints.Subsets) == 0 { framework.Failf("Expected at least 1 subset in endpoints, got %d: %#v", len(endpoints.Subsets), endpoints.Subsets) } // verify EndpointSlices for the API servers exist - endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{ + endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{ LabelSelector: "kubernetes.io/service-name=" + name, }) framework.ExpectNoError(err, "error obtaining API server \"kubernetes\" EndpointSlice resource on \"default\" namespace") @@ -100,7 +100,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { The endpointslice controller should create and delete EndpointSlices for Pods matching a Service. */ framework.ConformanceIt("should create and delete Endpoints and EndpointSlices for a Service with a selector specified", func(ctx context.Context) { - svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-empty-selector", }, @@ -118,7 +118,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { // Expect Endpoints resource to be created. if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -130,7 +130,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { // Expect EndpointSlice resource to be created. var endpointSlice discoveryv1.EndpointSlice if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{ + endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: "kubernetes.io/service-name=" + svc.Name, }) if err != nil { @@ -157,12 +157,12 @@ var _ = common.SIGDescribe("EndpointSlice", func() { framework.Failf("Expected EndpointSlice to have 0 endpoints, got %d: %#v", len(endpointSlice.Endpoints), endpointSlice.Endpoints) } - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "error deleting Service") // Expect Endpoints resource to be deleted when Service is. if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return true, nil @@ -179,7 +179,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { // and may need to retry informer resync at some point during an e2e // run. if err := wait.PollImmediate(2*time.Second, 90*time.Second, func() (bool, error) { - endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{ + endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: "kubernetes.io/service-name=" + svc.Name, }) if err != nil { @@ -209,7 +209,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { labelShared12 := "shared12" labelValue := "on" - pod1 := podClient.Create(&v1.Pod{ + pod1 := podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Labels: map[string]string{ @@ -231,7 +231,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { }, }) - pod2 := podClient.Create(&v1.Pod{ + pod2 := podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Labels: map[string]string{ @@ -256,7 +256,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { }, }) - svc1 := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc1 := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-int-port", }, @@ -272,7 +272,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { }, }) - svc2 := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc2 := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-named-port", }, @@ -288,7 +288,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { }, }) - svc3 := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc3 := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-no-match", }, @@ -306,7 +306,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) { var err error - pod1, err = podClient.Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err = podClient.Get(ctx, pod1.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -314,7 +314,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { return false, nil } - pod2, err = podClient.Get(context.TODO(), pod2.Name, metav1.GetOptions{}) + pod2, err = podClient.Get(ctx, pod2.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -327,19 +327,19 @@ var _ = common.SIGDescribe("EndpointSlice", func() { framework.ExpectNoError(err, "timed out waiting for Pods to have IPs assigned") ginkgo.By("referencing a single matching pod") - expectEndpointsAndSlices(cs, f.Namespace.Name, svc1, []*v1.Pod{pod1}, 1, 1, false) + expectEndpointsAndSlices(ctx, cs, f.Namespace.Name, svc1, []*v1.Pod{pod1}, 1, 1, false) ginkgo.By("referencing matching pods with named port") - expectEndpointsAndSlices(cs, f.Namespace.Name, svc2, []*v1.Pod{pod1, pod2}, 2, 2, true) + expectEndpointsAndSlices(ctx, cs, f.Namespace.Name, svc2, []*v1.Pod{pod1, pod2}, 2, 2, true) ginkgo.By("creating empty Endpoints and EndpointSlices for no matching Pods") - expectEndpointsAndSlices(cs, f.Namespace.Name, svc3, []*v1.Pod{}, 0, 1, false) + expectEndpointsAndSlices(ctx, cs, f.Namespace.Name, svc3, []*v1.Pod{}, 0, 1, false) // TODO: Update test to cover Endpoints recreation after deletes once it // actually works. ginkgo.By("recreating EndpointSlices after they've been deleted") - deleteEndpointSlices(cs, f.Namespace.Name, svc2) - expectEndpointsAndSlices(cs, f.Namespace.Name, svc2, []*v1.Pod{pod1, pod2}, 2, 2, true) + deleteEndpointSlices(ctx, cs, f.Namespace.Name, svc2) + expectEndpointsAndSlices(ctx, cs, f.Namespace.Name, svc2, []*v1.Pod{pod1, pod2}, 2, 2, true) }) /* @@ -391,7 +391,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { ginkgo.By("getting /apis/discovery.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/discovery.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/discovery.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -423,54 +423,54 @@ var _ = common.SIGDescribe("EndpointSlice", func() { // EndpointSlice resource create/read/update/watch verbs ginkgo.By("creating") - _, err := epsClient.Create(context.TODO(), epsTemplate, metav1.CreateOptions{}) + _, err := epsClient.Create(ctx, epsTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = epsClient.Create(context.TODO(), epsTemplate, metav1.CreateOptions{}) + _, err = epsClient.Create(ctx, epsTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdEPS, err := epsClient.Create(context.TODO(), epsTemplate, metav1.CreateOptions{}) + createdEPS, err := epsClient.Create(ctx, epsTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - queriedEPS, err := epsClient.Get(context.TODO(), createdEPS.Name, metav1.GetOptions{}) + queriedEPS, err := epsClient.Get(ctx, createdEPS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(queriedEPS.UID, createdEPS.UID) ginkgo.By("listing") - epsList, err := epsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + epsList, err := epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(epsList.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - epsWatch, err := epsClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: epsList.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + epsWatch, err := epsClient.Watch(ctx, metav1.ListOptions{ResourceVersion: epsList.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Test cluster-wide list and watch clusterEPSClient := f.ClientSet.DiscoveryV1().EndpointSlices("") ginkgo.By("cluster-wide listing") - clusterEPSList, err := clusterEPSClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + clusterEPSList, err := clusterEPSClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(clusterEPSList.Items), 3, "filtered list should have 3 items") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") - _, err = clusterEPSClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: epsList.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + _, err = clusterEPSClient.Watch(ctx, metav1.ListOptions{ResourceVersion: epsList.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedEPS, err := epsClient.Patch(context.TODO(), createdEPS.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedEPS, err := epsClient.Patch(ctx, createdEPS.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedEPS.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") var epsToUpdate, updatedEPS *discoveryv1.EndpointSlice err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - epsToUpdate, err = epsClient.Get(context.TODO(), createdEPS.Name, metav1.GetOptions{}) + epsToUpdate, err = epsClient.Get(ctx, createdEPS.Name, metav1.GetOptions{}) if err != nil { return err } epsToUpdate.Annotations["updated"] = "true" - updatedEPS, err = epsClient.Update(context.TODO(), epsToUpdate, metav1.UpdateOptions{}) + updatedEPS, err = epsClient.Update(ctx, epsToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -502,13 +502,13 @@ var _ = common.SIGDescribe("EndpointSlice", func() { ginkgo.By("deleting") - err = epsClient.Delete(context.TODO(), createdEPS.Name, metav1.DeleteOptions{}) + err = epsClient.Delete(ctx, createdEPS.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = epsClient.Get(context.TODO(), createdEPS.Name, metav1.GetOptions{}) + _, err = epsClient.Get(ctx, createdEPS.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %v", err) } - epsList, err = epsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + epsList, err = epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(epsList.Items), 2, "filtered list should have 2 items") for _, eps := range epsList.Items { @@ -518,9 +518,9 @@ var _ = common.SIGDescribe("EndpointSlice", func() { } ginkgo.By("deleting a collection") - err = epsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = epsClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - epsList, err = epsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + epsList, err = epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(epsList.Items), 0, "filtered list should have 0 items") }) @@ -532,10 +532,10 @@ var _ = common.SIGDescribe("EndpointSlice", func() { // necessarily consistent. It is used as a helper function for the tests above // and takes some shortcuts with the assumption that those test cases will be // the only caller of this function. -func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service, pods []*v1.Pod, numSubsets, numSlices int, namedPort bool) { +func expectEndpointsAndSlices(ctx context.Context, cs clientset.Interface, ns string, svc *v1.Service, pods []*v1.Pod, numSubsets, numSlices int, namedPort bool) { endpointSlices := []discoveryv1.EndpointSlice{} - if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - endpointSlicesFound, hasMatchingSlices := hasMatchingEndpointSlices(cs, ns, svc.Name, len(pods), numSlices) + if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { + endpointSlicesFound, hasMatchingSlices := hasMatchingEndpointSlices(ctx, cs, ns, svc.Name, len(pods), numSlices) if !hasMatchingSlices { return false, nil } @@ -546,8 +546,8 @@ func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service } endpoints := &v1.Endpoints{} - if err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { - endpointsFound, hasMatchingEndpoints := hasMatchingEndpoints(cs, ns, svc.Name, len(pods), numSubsets) + if err := wait.PollWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { + endpointsFound, hasMatchingEndpoints := hasMatchingEndpoints(ctx, cs, ns, svc.Name, len(pods), numSubsets) if !hasMatchingEndpoints { framework.Logf("Matching Endpoints not found") return false, nil @@ -698,13 +698,13 @@ func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service } // deleteEndpointSlices deletes EndpointSlices for the specified Service. -func deleteEndpointSlices(cs clientset.Interface, ns string, svc *v1.Service) { +func deleteEndpointSlices(ctx context.Context, cs clientset.Interface, ns string, svc *v1.Service) { listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)} - esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), listOptions) + esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(ctx, listOptions) framework.ExpectNoError(err, "Error fetching EndpointSlices for %s/%s Service", ns, svc.Name) for _, endpointSlice := range esList.Items { - err := cs.DiscoveryV1().EndpointSlices(ns).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{}) + err := cs.DiscoveryV1().EndpointSlices(ns).Delete(ctx, endpointSlice.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Error deleting %s/%s EndpointSlice", ns, endpointSlice.Name) } } @@ -712,9 +712,9 @@ func deleteEndpointSlices(cs clientset.Interface, ns string, svc *v1.Service) { // hasMatchingEndpointSlices returns any EndpointSlices that match the // conditions along with a boolean indicating if all the conditions have been // met. -func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEndpoints, numSlices int) ([]discoveryv1.EndpointSlice, bool) { +func hasMatchingEndpointSlices(ctx context.Context, cs clientset.Interface, ns, svcName string, numEndpoints, numSlices int) ([]discoveryv1.EndpointSlice, bool) { listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svcName)} - esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), listOptions) + esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(ctx, listOptions) framework.ExpectNoError(err, "Error fetching EndpointSlice for Service %s/%s", ns, svcName) if len(esList.Items) == 0 { @@ -756,8 +756,8 @@ func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEn // hasMatchingEndpoints returns any Endpoints that match the conditions along // with a boolean indicating if all the conditions have been met. -func hasMatchingEndpoints(cs clientset.Interface, ns, svcName string, numIPs, numSubsets int) (*v1.Endpoints, bool) { - endpoints, err := cs.CoreV1().Endpoints(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) +func hasMatchingEndpoints(ctx context.Context, cs clientset.Interface, ns, svcName string, numIPs, numSubsets int) (*v1.Endpoints, bool) { + endpoints, err := cs.CoreV1().Endpoints(ns).Get(ctx, svcName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Endpoints for %s/%s Service not found", ns, svcName) @@ -802,8 +802,8 @@ func ensurePodTargetRef(pod *v1.Pod, targetRef *v1.ObjectReference) { } // createServiceReportErr creates a Service and reports any associated error. -func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service { - svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) +func createServiceReportErr(ctx context.Context, cs clientset.Interface, ns string, service *v1.Service) *v1.Service { + svc, err := cs.CoreV1().Services(ns).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err, "error deleting Service") return svc } diff --git a/test/e2e/network/endpointslicemirroring.go b/test/e2e/network/endpointslicemirroring.go index ddfbc8f345c..2341a7240b5 100644 --- a/test/e2e/network/endpointslicemirroring.go +++ b/test/e2e/network/endpointslicemirroring.go @@ -53,7 +53,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { The endpointslices mirrorowing must mirror endpoint create, update, and delete actions. */ framework.ConformanceIt("should mirror a custom Endpoints resource through create update and delete", func(ctx context.Context) { - svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-custom-endpoints", }, @@ -81,11 +81,11 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { } ginkgo.By("mirroring a new custom Endpoint", func() { - _, err := cs.CoreV1().Endpoints(f.Namespace.Name).Create(context.TODO(), endpoints, metav1.CreateOptions{}) + _, err := cs.CoreV1().Endpoints(f.Namespace.Name).Create(ctx, endpoints, metav1.CreateOptions{}) framework.ExpectNoError(err, "Unexpected error creating Endpoints") if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) { - esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name, }) if err != nil { @@ -132,12 +132,12 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { endpoints.Subsets[0].Addresses = []v1.EndpointAddress{{ IP: "10.2.3.4", }} - _, err := cs.CoreV1().Endpoints(f.Namespace.Name).Update(context.TODO(), endpoints, metav1.UpdateOptions{}) + _, err := cs.CoreV1().Endpoints(f.Namespace.Name).Update(ctx, endpoints, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Unexpected error updating Endpoints") // Expect mirrored EndpointSlice resource to be updated. if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) { - esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name, }) if err != nil { @@ -179,12 +179,12 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { }) ginkgo.By("mirroring deletion of a custom Endpoint", func() { - err := cs.CoreV1().Endpoints(f.Namespace.Name).Delete(context.TODO(), endpoints.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Endpoints(f.Namespace.Name).Delete(ctx, endpoints.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Unexpected error deleting Endpoints") // Expect mirrored EndpointSlice resource to be updated. if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) { - esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name, }) if err != nil { @@ -204,7 +204,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { ginkgo.It("should mirror a custom Endpoint with multiple subsets and same IP address", func(ctx context.Context) { ns := f.Namespace.Name - svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ + svc := createServiceReportErr(ctx, cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-custom-endpoints", }, @@ -244,7 +244,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { e2epod.NewAgnhostContainer("container-handle-9090-request", nil, port9090, "netexec", "--http-port", "9090", "--udp-port", "-1"), ) - pod := e2epod.NewPodClient(f).CreateSync(serverPod) + pod := e2epod.NewPodClient(f).CreateSync(ctx, serverPod) if pod.Status.PodIP == "" { framework.Failf("PodIP not assigned for pod %s", pod.Name) @@ -302,7 +302,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { // connect to the service must work ginkgo.By("Creating a pause pods that will try to connect to the webservers") pausePod := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) - e2epod.NewPodClient(f).CreateSync(pausePod) + e2epod.NewPodClient(f).CreateSync(ctx, pausePod) dest1 := net.JoinHostPort(svc.Spec.ClusterIP, "80") dest2 := net.JoinHostPort(svc.Spec.ClusterIP, "81") execHostnameTest(*pausePod, dest1, pod.Name) diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 14c73017582..12f24ae80a5 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -92,7 +92,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { for i := range namespaces { var err error namespaceName := fmt.Sprintf("dnsexample%d", i) - namespaces[i], err = f.CreateNamespace(namespaceName, nil) + namespaces[i], err = f.CreateNamespace(ctx, namespaceName, nil) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) } @@ -106,21 +106,21 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { // wait for objects for _, ns := range namespaces { - e2eresource.WaitForControlledPodsRunning(c, ns.Name, backendName, api.Kind("ReplicationController")) - e2enetwork.WaitForService(c, ns.Name, backendName, true, framework.Poll, framework.ServiceStartTimeout) + e2eresource.WaitForControlledPodsRunning(ctx, c, ns.Name, backendName, api.Kind("ReplicationController")) + framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns.Name, backendName, true, framework.Poll, framework.ServiceStartTimeout)) } // it is not enough that pods are running because they may be set to running, but // the application itself may have not been initialized. Just query the application. for _, ns := range namespaces { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns.Name).List(ctx, options) framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name) - err = e2epod.PodsResponding(c, ns.Name, backendName, false, pods) + err = e2epod.PodsResponding(ctx, c, ns.Name, backendName, false, pods) framework.ExpectNoError(err, "waiting for all pods to respond") framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) - err = waitForServiceResponding(c, ns.Name, backendName) + err = waitForServiceResponding(ctx, c, ns.Name, backendName) framework.ExpectNoError(err, "waiting for the service to respond") } @@ -134,7 +134,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { // This code is probably unnecessary, but let's stay on the safe side. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(namespaces[0].Name).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(namespaces[0].Name).List(ctx, options) if err != nil || pods == nil || len(pods.Items) == 0 { framework.Failf("no running pods found") @@ -155,7 +155,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // that we cannot wait for the pods to be running because our pods terminate by themselves. for _, ns := range namespaces { - err := e2epod.WaitForPodNotPending(c, ns.Name, frontendName) + err := e2epod.WaitForPodNotPending(ctx, c, ns.Name, frontendName) framework.ExpectNoError(err) } @@ -168,17 +168,17 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { }) // waitForServiceResponding waits for the service to be responding. -func waitForServiceResponding(c clientset.Interface, ns, name string) error { +func waitForServiceResponding(ctx context.Context, c clientset.Interface, ns, name string) error { ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) - return wait.PollImmediate(framework.Poll, RespondingTimeout, func() (done bool, err error) { + return wait.PollImmediateWithContext(ctx, framework.Poll, RespondingTimeout, func(ctx context.Context) (done bool, err error) { proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("Failed to get services proxy request: %v:", errProxy) return false, nil } - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() body, err := proxyRequest.Namespace(ns). diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index a4b05823de0..34ae4cb097d 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -78,12 +78,12 @@ var _ = common.SIGDescribe("Firewall rule", func() { serviceName := "firewall-test-loadbalancer" ginkgo.By("Getting cluster ID") - clusterID, err := gce.GetClusterID(cs) + clusterID, err := gce.GetClusterID(ctx, cs) framework.ExpectNoError(err) framework.Logf("Got cluster ID: %v", clusterID) jig := e2eservice.NewTestJig(cs, ns, serviceName) - nodeList, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) + nodeList, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests) framework.ExpectNoError(err) nodesNames := []string{} @@ -93,22 +93,22 @@ var _ = common.SIGDescribe("Firewall rule", func() { nodesSet := sets.NewString(nodesNames...) ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") - svc, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(cs), func(svc *v1.Service) { + svc, err := jig.CreateLoadBalancerService(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs), func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges }) framework.ExpectNoError(err) defer func() { - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) framework.ExpectNoError(err) - err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) - _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout) + _, err := gce.WaitForFirewallRule(ctx, gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout) framework.ExpectNoError(err) }() svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP @@ -129,18 +129,18 @@ var _ = common.SIGDescribe("Firewall rule", func() { // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal }) framework.ExpectNoError(err) ginkgo.By("Waiting for the nodes health check firewall rule to be deleted") - _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout) + _, err = gce.WaitForFirewallRule(ctx, gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout) framework.ExpectNoError(err) ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created") localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) - fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + fw, err = gce.WaitForFirewallRule(ctx, gceCloud, localHCFw.Name, true, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) err = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false) framework.ExpectNoError(err) @@ -158,21 +158,21 @@ var _ = common.SIGDescribe("Firewall rule", func() { nodeSelection := e2epod.NodeSelection{Name: nodeName} e2epod.SetNodeSelection(&pod.Spec, nodeSelection) pod.Spec.HostNetwork = true - _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name, framework.PodStartTimeout)) framework.Logf("Netexec pod %q in namespace %q running", podName, ns) defer func() { framework.Logf("Cleaning up the netexec pod: %v", podName) - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, podName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() } // Send requests from outside of the cluster because internal traffic is allowlisted ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached") - err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet) + err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs), nodesSet) framework.ExpectNoError(err) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster @@ -195,12 +195,12 @@ var _ = common.SIGDescribe("Firewall rule", func() { nodesSet.Insert(nodesNames[0]) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet) + err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs), nodesSet) framework.ExpectNoError(err) }() ginkgo.By("Accessing service through the external ip and examine got no response from the node without tags") - err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15) + err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs), nodesSet, 15) framework.ExpectNoError(err) }) @@ -215,7 +215,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { }) ginkgo.It("control plane should not expose well-known ports", func(ctx context.Context) { - nodes, err := e2enode.GetReadySchedulableNodes(cs) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) ginkgo.By("Checking well known ports on master and nodes are not exposed externally") @@ -226,7 +226,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { assertNotReachableHTTPTimeout(nodeAddr, "/", ports.ProxyStatusPort, firewallTestTCPTimeout, false) } - controlPlaneAddresses := framework.GetControlPlaneAddresses(cs) + controlPlaneAddresses := framework.GetControlPlaneAddresses(ctx, cs) for _, instanceAddress := range controlPlaneAddresses { assertNotReachableHTTPTimeout(instanceAddress, "/healthz", ports.KubeControllerManagerPort, firewallTestTCPTimeout, true) assertNotReachableHTTPTimeout(instanceAddress, "/healthz", kubeschedulerconfig.DefaultKubeSchedulerPort, firewallTestTCPTimeout, true) diff --git a/test/e2e/network/funny_ips.go b/test/e2e/network/funny_ips.go index c60f9b0f896..8ed032edca2 100644 --- a/test/e2e/network/funny_ips.go +++ b/test/e2e/network/funny_ips.go @@ -96,13 +96,13 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { servicePort := 7180 jig := e2eservice.NewTestJig(cs, ns, serviceName) - clusterIPZero, clusterIPOctal := getServiceIPWithLeadingZeros(cs) + clusterIPZero, clusterIPOctal := getServiceIPWithLeadingZeros(ctx, cs) if clusterIPZero == "" { e2eskipper.Skipf("Couldn't find a free ClusterIP") } ginkgo.By("creating service " + serviceName + " with type=ClusterIP and ip " + clusterIPZero + " in namespace " + ns) - _, err := jig.CreateTCPService(func(svc *v1.Service) { + _, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.ClusterIP = clusterIPZero // IP with a leading zero svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Ports = []v1.ServicePort{ @@ -111,10 +111,10 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { }) framework.ExpectNoError(err) - err = jig.CreateServicePods(1) + err = jig.CreateServicePods(ctx, 1) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) ip := netutils.ParseIPSloppy(clusterIPZero) cmd := fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", ip.String(), servicePort) err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) { @@ -160,11 +160,11 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { // Try to get a free IP that has different decimal and binary interpretation with leading zeros. // Return both IPs, the one interpretad as binary and the one interpreted as decimal. // Return empty if not IPs are found. -func getServiceIPWithLeadingZeros(cs clientset.Interface) (string, string) { +func getServiceIPWithLeadingZeros(ctx context.Context, cs clientset.Interface) (string, string) { clusterIPMap := map[string]struct{}{} var clusterIPPrefix string // Dump all the IPs and look for the ones we want. - list, err := cs.CoreV1().Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) + list, err := cs.CoreV1().Services(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range list.Items { if len(svc.Spec.ClusterIP) == 0 || svc.Spec.ClusterIP == v1.ClusterIPNone { diff --git a/test/e2e/network/hostport.go b/test/e2e/network/hostport.go index eb5f7272a2d..b8715910751 100644 --- a/test/e2e/network/hostport.go +++ b/test/e2e/network/hostport.go @@ -69,7 +69,7 @@ var _ = common.SIGDescribe("HostPort", func() { family = v1.IPv6Protocol } // Get a node where to schedule the pods - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 1) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 1) framework.ExpectNoError(err) if len(nodes.Items) == 0 { framework.Failf("No nodes available") @@ -86,13 +86,13 @@ var _ = common.SIGDescribe("HostPort", func() { // Create pods with the same HostPort ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) - createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, randomNode.Name) + createHostPortPodOnNode(ctx, f, "pod1", ns, localhost, port, v1.ProtocolTCP, randomNode.Name) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP)) - createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, randomNode.Name) + createHostPortPodOnNode(ctx, f, "pod2", ns, hostIP, port, v1.ProtocolTCP, randomNode.Name) ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP)) - createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, randomNode.Name) + createHostPortPodOnNode(ctx, f, "pod3", ns, hostIP, port, v1.ProtocolUDP, randomNode.Name) // check that the port is being actually exposed to each container // create a pod on the host network in the same node @@ -112,7 +112,7 @@ var _ = common.SIGDescribe("HostPort", func() { }, }, } - e2epod.NewPodClient(f).CreateSync(hostExecPod) + e2epod.NewPodClient(f).CreateSync(ctx, hostExecPod) // use a 5 seconds timeout per connection timeout := 5 @@ -164,7 +164,7 @@ var _ = common.SIGDescribe("HostPort", func() { // create pod which using hostport on the specified node according to the nodeSelector // it starts an http server on the exposed port -func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeName string) { +func createHostPortPodOnNode(ctx context.Context, f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeName string) { var netexecArgs []string var readinessProbePort int32 @@ -211,11 +211,11 @@ func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, NodeName: nodeName, }, } - if _, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), hostPortPod, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(ns).Create(ctx, hostPortPod, metav1.CreateOptions{}); err != nil { framework.Failf("error creating pod %s, err:%v", podName, err) } - if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, ns, framework.PodStartTimeout); err != nil { + if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podName, ns, framework.PodStartTimeout); err != nil { framework.Failf("wait for pod %s timeout, err:%v", podName, err) } } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index a58a0eac930..ea71d2a129d 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -61,17 +61,17 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { f := framework.NewDefaultFramework("ingress") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { jig = e2eingress.NewIngressTestJig(f.ClientSet) ns = f.Namespace.Name // this test wants powerful permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches - err := e2eauth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name, + err := e2eauth.BindClusterRole(ctx, jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) framework.ExpectNoError(err) - err = e2eauth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(ctx, jig.Client.AuthorizationV1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), "", "create", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) @@ -88,7 +88,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { var gceController *gce.IngressController // Platform specific setup - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ @@ -96,12 +96,12 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } - err := gceController.Init() + err := gceController.Init(ctx) framework.ExpectNoError(err) }) // Platform specific cleanup - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { e2eingress.DescribeIng(ns) } @@ -110,20 +110,20 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { return } ginkgo.By("Deleting ingress") - jig.TryDeleteIngress() + jig.TryDeleteIngress(ctx) ginkgo.By("Cleaning up cloud resources") - err := gceController.CleanupIngressController() + err := gceController.CleanupIngressController(ctx) framework.ExpectNoError(err) }) ginkgo.It("should conform to Ingress spec", func(ctx context.Context) { - conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) + conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{}) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) t.Execute() ginkgo.By(t.ExitLog) - jig.WaitForIngress(true) + jig.WaitForIngress(ctx, true) } }) @@ -133,7 +133,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { var gceController *gce.IngressController // Platform specific setup - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ @@ -141,12 +141,12 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } - err := gceController.Init() + err := gceController.Init(ctx) framework.ExpectNoError(err) }) // Platform specific cleanup - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { e2eingress.DescribeIng(ns) } @@ -155,80 +155,80 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { return } ginkgo.By("Deleting ingress") - jig.TryDeleteIngress() + jig.TryDeleteIngress(ctx) ginkgo.By("Cleaning up cloud resources") - err := gceController.CleanupIngressController() + err := gceController.CleanupIngressController(ctx) framework.ExpectNoError(err) }) ginkgo.It("should conform to Ingress spec", func(ctx context.Context) { jig.PollInterval = 5 * time.Second - conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{ + conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{ e2eingress.NEGAnnotation: `{"ingress": true}`, }) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) t.Execute() ginkgo.By(t.ExitLog) - jig.WaitForIngress(true) - err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + jig.WaitForIngress(ctx, true) + err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false)) framework.ExpectNoError(err) } }) ginkgo.It("should be able to switch between IG and NEG modes", func(ctx context.Context) { var err error - propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet) + propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet) ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) - err = gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) + err = gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false)) framework.ExpectNoError(err) ginkgo.By("Switch backend service to use IG") - svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - err = wait.Poll(5*time.Second, propagationTimeout, func() (bool, error) { - if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { + err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) { + if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false)); err != nil { framework.Logf("ginkgo.Failed to verify IG backend service: %v", err) return false, nil } return true, nil }) framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe") - jig.WaitForIngress(true) + jig.WaitForIngress(ctx, true) ginkgo.By("Switch backend service to use NEG") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - err = wait.Poll(5*time.Second, propagationTimeout, func() (bool, error) { - if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { + err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) { + if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false)); err != nil { framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err) return false, nil } return true, nil }) framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe") - jig.WaitForIngress(true) + jig.WaitForIngress(ctx, true) }) ginkgo.It("should be able to create a ClusterIP service", func(ctx context.Context) { ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) - svcPorts := jig.GetServicePorts(false) - err := gceController.WaitForNegBackendService(svcPorts) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) + svcPorts := jig.GetServicePorts(ctx, false) + err := gceController.WaitForNegBackendService(ctx, svcPorts) framework.ExpectNoError(err) // ClusterIP ServicePorts have no NodePort @@ -240,16 +240,16 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should sync endpoints to NEG", func(ctx context.Context) { name := "hostname" scaleAndValidateNEG := func(num int) { - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { - res, err := jig.GetDistinctResponseFromIngress() + res, err := jig.GetDistinctResponseFromIngress(ctx) if err != nil { return false, nil } @@ -260,10 +260,10 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { } ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) - jig.WaitForIngressToStable() - err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) + jig.WaitForIngressToStable(ctx) + err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false)) framework.ExpectNoError(err) // initial replicas number is 1 scaleAndValidateNEG(1) @@ -285,23 +285,23 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { name := "hostname" replicas := 8 ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) - jig.WaitForIngressToStable() - err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) + jig.WaitForIngressToStable(ctx) + err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false)) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas)) - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) - propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet) + propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet) err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) { - res, err := jig.GetDistinctResponseFromIngress() + res, err := jig.GetDistinctResponseFromIngress(ctx) if err != nil { return false, nil } @@ -310,19 +310,19 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) ginkgo.By("Trigger rolling update and observe service disruption") - deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) + deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod - _, err = f.ClientSet.AppsV1().Deployments(ns).Update(context.TODO(), deploy, metav1.UpdateOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(ns).Update(ctx, deploy, metav1.UpdateOptions{}) framework.ExpectNoError(err) err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) { - res, err := jig.GetDistinctResponseFromIngress() + res, err := jig.GetDistinctResponseFromIngress(ctx) if err != nil { return false, err } - deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) + deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err } @@ -345,16 +345,16 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { expectedKeys := []int32{80, 443} scaleAndValidateExposedNEG := func(num int) { - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{}) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err) var status e2eingress.NegStatus @@ -404,9 +404,9 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { } ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) - err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) + err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false)) framework.ExpectNoError(err) // initial replicas number is 1 scaleAndValidateExposedNEG(1) @@ -426,71 +426,71 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func(ctx context.Context) { ginkgo.By("Create a basic HTTP ingress using standalone NEG") - jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) - jig.WaitForIngress(true) + jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) + jig.WaitForIngress(ctx, true) name := "hostname" - detectNegAnnotation(f, jig, gceController, ns, name, 2) + detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2) // Add Ingress annotation - NEGs should stay the same. ginkgo.By("Adding NEG Ingress annotation") - svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - detectNegAnnotation(f, jig, gceController, ns, name, 2) + detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2) // Modify exposed NEG annotation, but keep ingress annotation ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - detectNegAnnotation(f, jig, gceController, ns, name, 2) + detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2) // Remove Ingress annotation. Expect 1 NEG ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - detectNegAnnotation(f, jig, gceController, ns, name, 1) + detectNegAnnotation(ctx, f, jig, gceController, ns, name, 1) // Remove NEG annotation entirely. Expect 0 NEGs. ginkgo.By("Removing NEG annotation") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { delete(svc.Annotations, e2eingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort - _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - detectNegAnnotation(f, jig, gceController, ns, name, 0) + detectNegAnnotation(ctx, f, jig, gceController, ns, name, 0) }) }) }) -func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { +func detectNegAnnotation(ctx context.Context, f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, nil } // if we expect no NEGs, then we should be using IGs if negs == 0 { - err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)) + err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false)) if err != nil { framework.Logf("ginkgo.Failed to validate IG backend service: %v", err) return false, nil @@ -528,7 +528,7 @@ func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceCon } } - err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) + err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false)) if err != nil { framework.Logf("ginkgo.Failed to validate NEG backend service: %v", err) return false, nil @@ -634,7 +634,7 @@ var _ = common.SIGDescribe("Ingress API", func() { ginkgo.By("getting /apis/networking.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -666,54 +666,54 @@ var _ = common.SIGDescribe("Ingress API", func() { // Ingress resource create/read/update/watch verbs ginkgo.By("creating") - _, err := ingClient.Create(context.TODO(), ingress1, metav1.CreateOptions{}) + _, err := ingClient.Create(ctx, ingress1, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = ingClient.Create(context.TODO(), ingress2, metav1.CreateOptions{}) + _, err = ingClient.Create(ctx, ingress2, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdIngress, err := ingClient.Create(context.TODO(), ingress3, metav1.CreateOptions{}) + createdIngress, err := ingClient.Create(ctx, ingress3, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenIngress, err := ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + gottenIngress, err := ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenIngress.UID, createdIngress.UID) ginkgo.By("listing") - ings, err := ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + ings, err := ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(ings.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - ingWatch, err := ingClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + ingWatch, err := ingClient.Watch(ctx, metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Test cluster-wide list and watch clusterIngClient := f.ClientSet.NetworkingV1().Ingresses("") ginkgo.By("cluster-wide listing") - clusterIngs, err := clusterIngClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + clusterIngs, err := clusterIngClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(clusterIngs.Items), 3, "filtered list should have 3 items") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") - _, err = clusterIngClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + _, err = clusterIngClient.Watch(ctx, metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedIngress, err := ingClient.Patch(context.TODO(), createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedIngress, err := ingClient.Patch(ctx, createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedIngress.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") var ingToUpdate, updatedIngress *networkingv1.Ingress err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - ingToUpdate, err = ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + ingToUpdate, err = ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{}) if err != nil { return err } ingToUpdate.Annotations["updated"] = "true" - updatedIngress, err = ingClient.Update(context.TODO(), ingToUpdate, metav1.UpdateOptions{}) + updatedIngress, err = ingClient.Update(ctx, ingToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -750,7 +750,7 @@ var _ = common.SIGDescribe("Ingress API", func() { } lbStatusJSON, err := json.Marshal(lbStatus) framework.ExpectNoError(err) - patchedStatus, err := ingClient.Patch(context.TODO(), createdIngress.Name, types.MergePatchType, + patchedStatus, err := ingClient.Patch(ctx, createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err) @@ -760,14 +760,14 @@ var _ = common.SIGDescribe("Ingress API", func() { ginkgo.By("updating /status") var statusToUpdate, updatedStatus *networkingv1.Ingress err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + statusToUpdate, err = ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{}) if err != nil { return err } statusToUpdate.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ Ingress: []networkingv1.IngressLoadBalancerIngress{{IP: "169.1.1.2"}}, } - updatedStatus, err = ingClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = ingClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err) @@ -775,7 +775,7 @@ var _ = common.SIGDescribe("Ingress API", func() { ginkgo.By("get /status") ingResource := schema.GroupVersionResource{Group: "networking.k8s.io", Version: ingVersion, Resource: "ingresses"} - gottenStatus, err := f.DynamicClient.Resource(ingResource).Namespace(ns).Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}, "status") + gottenStatus, err := f.DynamicClient.Resource(ingResource).Namespace(ns).Get(ctx, createdIngress.Name, metav1.GetOptions{}, "status") framework.ExpectNoError(err) statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") framework.ExpectNoError(err) @@ -791,9 +791,9 @@ var _ = common.SIGDescribe("Ingress API", func() { } } - err = ingClient.Delete(context.TODO(), createdIngress.Name, metav1.DeleteOptions{}) + err = ingClient.Delete(ctx, createdIngress.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - ing, err := ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + ing, err := ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{}) // If ingress controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior. if err == nil { expectFinalizer(ing, "deleting createdIngress") @@ -802,7 +802,7 @@ var _ = common.SIGDescribe("Ingress API", func() { framework.Failf("expected 404, got %v", err) } } - ings, err = ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + ings, err = ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Should have <= 3 items since some ingresses might not have been deleted yet due to finalizers if len(ings.Items) > 3 { @@ -816,9 +816,9 @@ var _ = common.SIGDescribe("Ingress API", func() { } ginkgo.By("deleting a collection") - err = ingClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = ingClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - ings, err = ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + ings, err = ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Should have <= 3 items since some ingresses might not have been deleted yet due to finalizers if len(ings.Items) > 3 { diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index da489f0818a..8d7377c8ff2 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -45,23 +45,23 @@ var _ = common.SIGDescribe("Loadbalancing: L7 Scalability", func() { scaleFramework *scale.IngressScaleFramework ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) - if err := scaleFramework.PrepareScaleTest(); err != nil { + if err := scaleFramework.PrepareScaleTest(ctx); err != nil { framework.Failf("Unexpected error while preparing ingress scale test: %v", err) } }) - ginkgo.AfterEach(func() { - if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { + ginkgo.AfterEach(func(ctx context.Context) { + if errs := scaleFramework.CleanupScaleTest(ctx); len(errs) != 0 { framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) } }) ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func(ctx context.Context) { - if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { + if errs := scaleFramework.RunScaleTest(ctx); len(errs) != 0 { framework.Failf("Unexpected error while running ingress scale test: %v", errs) } diff --git a/test/e2e/network/ingressclass.go b/test/e2e/network/ingressclass.go index 9fc289e7238..393116ae2a3 100644 --- a/test/e2e/network/ingressclass.go +++ b/test/e2e/network/ingressclass.go @@ -46,19 +46,19 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { }) ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) { - ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) + ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() lastFailure := "" // the admission controller may take a few seconds to observe the ingress classes - if err := wait.Poll(time.Second, time.Minute, func() (bool, error) { + if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { lastFailure = "" - ingress, err := createBasicIngress(cs, f.Namespace.Name) + ingress, err := createBasicIngress(ctx, cs, f.Namespace.Name) if err != nil { lastFailure = err.Error() return false, err @@ -83,19 +83,19 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { }) ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) { - ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName) + ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", false, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() lastFailure := "" // the admission controller may take a few seconds to observe the ingress classes - if err := wait.Poll(time.Second, time.Minute, func() (bool, error) { + if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { lastFailure = "" - ingress, err := createBasicIngress(cs, f.Namespace.Name) + ingress, err := createBasicIngress(ctx, cs, f.Namespace.Name) if err != nil { lastFailure = err.Error() return false, err @@ -117,11 +117,11 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { }) ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) { - ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) + ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) - ingressClass2, err := createIngressClass(cs, "ingressclass2", true, f.UniqueName) + ingressClass2, err := createIngressClass(ctx, cs, "ingressclass2", true, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass2.Name) @@ -130,7 +130,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { expectedName = ingressClass2.Name } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() // the admission controller may take a few seconds to observe both ingress classes @@ -148,7 +148,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { if cntDefault < 2 { return false, nil } - ingress, err := createBasicIngress(cs, f.Namespace.Name) + ingress, err := createBasicIngress(ctx, cs, f.Namespace.Name) if err != nil { return false, nil } @@ -184,7 +184,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { }, }, } - createdIngressClass, err := cs.NetworkingV1().IngressClasses().Create(context.TODO(), ingressClass, metav1.CreateOptions{}) + createdIngressClass, err := cs.NetworkingV1().IngressClasses().Create(ctx, ingressClass, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, createdIngressClass.Name) @@ -203,7 +203,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { }) -func createIngressClass(cs clientset.Interface, name string, isDefault bool, uniqueName string) (*networkingv1.IngressClass, error) { +func createIngressClass(ctx context.Context, cs clientset.Interface, name string, isDefault bool, uniqueName string) (*networkingv1.IngressClass, error) { ingressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -221,11 +221,11 @@ func createIngressClass(cs clientset.Interface, name string, isDefault bool, uni ingressClass.Annotations = map[string]string{networkingv1.AnnotationIsDefaultIngressClass: "true"} } - return cs.NetworkingV1().IngressClasses().Create(context.TODO(), ingressClass, metav1.CreateOptions{}) + return cs.NetworkingV1().IngressClasses().Create(ctx, ingressClass, metav1.CreateOptions{}) } -func createBasicIngress(cs clientset.Interface, namespace string) (*networkingv1.Ingress, error) { - return cs.NetworkingV1().Ingresses(namespace).Create(context.TODO(), &networkingv1.Ingress{ +func createBasicIngress(ctx context.Context, cs clientset.Interface, namespace string) (*networkingv1.Ingress, error) { + return cs.NetworkingV1().Ingresses(namespace).Create(ctx, &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "ingress1", }, @@ -242,8 +242,8 @@ func createBasicIngress(cs clientset.Interface, namespace string) (*networkingv1 }, metav1.CreateOptions{}) } -func deleteIngressClass(cs clientset.Interface, name string) { - err := cs.NetworkingV1().IngressClasses().Delete(context.TODO(), name, metav1.DeleteOptions{}) +func deleteIngressClass(ctx context.Context, cs clientset.Interface, name string) { + err := cs.NetworkingV1().IngressClasses().Delete(ctx, name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } @@ -292,7 +292,7 @@ var _ = common.SIGDescribe("IngressClass API", func() { ginkgo.By("getting /apis/networking.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -324,38 +324,38 @@ var _ = common.SIGDescribe("IngressClass API", func() { // IngressClass resource create/read/update/watch verbs ginkgo.By("creating") - ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName) + ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", false, f.UniqueName) framework.ExpectNoError(err) - _, err = createIngressClass(cs, "ingressclass2", false, f.UniqueName) + _, err = createIngressClass(ctx, cs, "ingressclass2", false, f.UniqueName) framework.ExpectNoError(err) - _, err = createIngressClass(cs, "ingressclass3", false, f.UniqueName) + _, err = createIngressClass(ctx, cs, "ingressclass3", false, f.UniqueName) framework.ExpectNoError(err) ginkgo.By("getting") - gottenIC, err := icClient.Get(context.TODO(), ingressClass1.Name, metav1.GetOptions{}) + gottenIC, err := icClient.Get(ctx, ingressClass1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenIC.UID, ingressClass1.UID) framework.ExpectEqual(gottenIC.UID, ingressClass1.UID) ginkgo.By("listing") - ics, err := icClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=generic"}) + ics, err := icClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=generic"}) framework.ExpectNoError(err) framework.ExpectEqual(len(ics.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - icWatch, err := icClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: ics.ResourceVersion, LabelSelector: "ingressclass=" + f.UniqueName}) + icWatch, err := icClient.Watch(ctx, metav1.ListOptions{ResourceVersion: ics.ResourceVersion, LabelSelector: "ingressclass=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedIC, err := icClient.Patch(context.TODO(), ingressClass1.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedIC, err := icClient.Patch(ctx, ingressClass1.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedIC.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") icToUpdate := patchedIC.DeepCopy() icToUpdate.Annotations["updated"] = "true" - updatedIC, err := icClient.Update(context.TODO(), icToUpdate, metav1.UpdateOptions{}) + updatedIC, err := icClient.Update(ctx, icToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedIC.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -385,20 +385,20 @@ var _ = common.SIGDescribe("IngressClass API", func() { // IngressClass resource delete operations ginkgo.By("deleting") - err = icClient.Delete(context.TODO(), ingressClass1.Name, metav1.DeleteOptions{}) + err = icClient.Delete(ctx, ingressClass1.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = icClient.Get(context.TODO(), ingressClass1.Name, metav1.GetOptions{}) + _, err = icClient.Get(ctx, ingressClass1.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %#v", err) } - ics, err = icClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) + ics, err = icClient.List(ctx, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(ics.Items), 2, "filtered list should have 2 items") ginkgo.By("deleting a collection") - err = icClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) + err = icClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) framework.ExpectNoError(err) - ics, err = icClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) + ics, err = icClient.List(ctx, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(ics.Items), 0, "filtered list should have 0 items") }) diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index effedd3f76d..83541539b01 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -54,7 +54,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should set TCP CLOSE_WAIT timeout [Privileged]", func(ctx context.Context) { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(fr.ClientSet, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, fr.ClientSet, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { e2eskipper.Skipf( @@ -118,7 +118,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { }, }, } - e2epod.NewPodClient(fr).CreateSync(hostExecPod) + e2epod.NewPodClient(fr).CreateSync(ctx, hostExecPod) // Create the client and server pods clientPodSpec := &v1.Pod{ @@ -186,10 +186,10 @@ var _ = common.SIGDescribe("KubeProxy", func() { serverNodeInfo.name, serverNodeInfo.nodeIP, kubeProxyE2eImage)) - e2epod.NewPodClient(fr).CreateSync(serverPodSpec) + e2epod.NewPodClient(fr).CreateSync(ctx, serverPodSpec) // The server should be listening before spawning the client pod - if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil { framework.Failf("error waiting for server pod %s to be ready: %v", serverPodSpec.Name, readyErr) } // Connect to the server and leak the connection @@ -198,7 +198,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { clientNodeInfo.name, clientNodeInfo.nodeIP, kubeProxyE2eImage)) - e2epod.NewPodClient(fr).CreateSync(clientPodSpec) + e2epod.NewPodClient(fr).CreateSync(ctx, clientPodSpec) ginkgo.By("Checking conntrack entries for the timeout") // These must be synchronized from the default values set in diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 61a92c208a5..14d5dcad1c6 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -78,8 +78,8 @@ func getInternalIP(node *v1.Node) (string, error) { // InternalIP adding a /16 or /64 mask depending on the IP family of the node. // IMPORTANT: These assumes a flat network assigned to the nodes, that is common // on cloud providers. -func getSubnetPrefix(c clientset.Interface) (*net.IPNet, error) { - node, err := getReadySchedulableWorkerNode(c) +func getSubnetPrefix(ctx context.Context, c clientset.Interface) (*net.IPNet, error) { + node, err := getReadySchedulableWorkerNode(ctx, c) if err != nil { return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err) } @@ -103,8 +103,8 @@ func getSubnetPrefix(c clientset.Interface) (*net.IPNet, error) { // getReadySchedulableWorkerNode gets a single worker node which is available for // running pods on. If there are no such available nodes it will return an error. -func getReadySchedulableWorkerNode(c clientset.Interface) (*v1.Node, error) { - nodes, err := e2enode.GetReadySchedulableNodes(c) +func getReadySchedulableWorkerNode(ctx context.Context, c clientset.Interface) (*v1.Node, error) { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) if err != nil { return nil, err } @@ -127,13 +127,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { var subnetPrefix *net.IPNet var err error - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet - subnetPrefix, err = getSubnetPrefix(cs) + subnetPrefix, err = getSubnetPrefix(ctx, cs) framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { DescribeSvc(f.Namespace.Name) } @@ -147,7 +147,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { if framework.ProviderIs("aws") { loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. @@ -156,40 +156,40 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ns1 := f.Namespace.Name // LB1 in ns1 on TCP framework.Logf("namespace for TCP test: %s", ns1) - nodeIP, err := getRandomNodeIP(cs) + nodeIP, err := getRandomNodeIP(ctx, cs) framework.ExpectNoError(err, "Could not obtain a valid Node IP") ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName) - tcpService, err := tcpJig.CreateTCPService(nil) + tcpService, err := tcpJig.CreateTCPService(ctx, nil) framework.ExpectNoError(err) svcPort := int(tcpService.Spec.Ports[0].Port) framework.Logf("service port TCP: %d", svcPort) ginkgo.By("creating a pod to be part of the TCP service " + serviceName) - _, err = tcpJig.Run(nil) + _, err = tcpJig.Run(ctx, nil) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns1, "execpod", nil) - err = tcpJig.CheckServiceReachability(tcpService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns1, "execpod", nil) + err = tcpJig.CheckServiceReachability(ctx, tcpService, execPod) framework.ExpectNoError(err) // Change the services to NodePort. ginkgo.By("changing the TCP service to type=NodePort") - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + tcpService, err = tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) framework.ExpectNoError(err) tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) framework.Logf("TCP node port: %d", tcpNodePort) - err = tcpJig.CheckServiceReachability(tcpService, execPod) + err = tcpJig.CheckServiceReachability(ctx, tcpService, execPod) framework.ExpectNoError(err) ginkgo.By("hitting the TCP service's NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) // Change the services to LoadBalancer. @@ -221,7 +221,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("changing the TCP service to type=LoadBalancer") - _, err = tcpJig.UpdateService(func(s *v1.Service) { + _, err = tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = v1.ServiceTypeLoadBalancer }) @@ -229,7 +229,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.By("waiting for the TCP service to have a load balancer") // Wait for the load balancer to be created asynchronously - tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + tcpService, err = tcpJig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) @@ -258,19 +258,19 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } } - err = tcpJig.CheckServiceReachability(tcpService, execPod) + err = tcpJig.CheckServiceReachability(ctx, tcpService, execPod) framework.ExpectNoError(err) ginkgo.By("hitting the TCP service's NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' node ports. ginkgo.By("changing the TCP service's NodePort") - tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort) + tcpService, err = tcpJig.ChangeServiceNodePort(ctx, tcpNodePort) framework.ExpectNoError(err) tcpNodePortOld := tcpNodePort tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) @@ -283,18 +283,18 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Logf("TCP node port: %d", tcpNodePort) ginkgo.By("hitting the TCP service's new NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("checking the old TCP NodePort is closed") testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' main ports. ginkgo.By("changing the TCP service's port") - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + tcpService, err = tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) framework.ExpectNoError(err) @@ -313,13 +313,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Logf("service port TCP: %d", svcPort) ginkgo.By("hitting the TCP service's NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout) ginkgo.By("Scaling the pods to 0") - err = tcpJig.Scale(0) + err = tcpJig.Scale(ctx, 0) framework.ExpectNoError(err) ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") @@ -329,19 +329,19 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) ginkgo.By("Scaling the pods to 1") - err = tcpJig.Scale(1) + err = tcpJig.Scale(ctx, 1) framework.ExpectNoError(err) ginkgo.By("hitting the TCP service's NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout) // Change the services back to ClusterIP. ginkgo.By("changing TCP service back to type=ClusterIP") - tcpReadback, err := tcpJig.UpdateService(func(s *v1.Service) { + tcpReadback, err := tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP }) framework.ExpectNoError(err) @@ -349,7 +349,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Fail("TCP Spec.Ports[0].NodePort was not cleared") } // Wait for the load balancer to be destroyed asynchronously - _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + _, err = tcpJig.WaitForLoadBalancerDestroy(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout) framework.ExpectNoError(err) ginkgo.By("checking the TCP NodePort is closed") @@ -364,7 +364,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. @@ -373,36 +373,36 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ns2 := f.Namespace.Name // LB1 in ns2 on TCP framework.Logf("namespace for TCP test: %s", ns2) - nodeIP, err := getRandomNodeIP(cs) + nodeIP, err := getRandomNodeIP(ctx, cs) framework.ExpectNoError(err, "Could not obtain a valid Node IP") ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) udpJig := e2eservice.NewTestJig(cs, ns2, serviceName) - udpService, err := udpJig.CreateUDPService(nil) + udpService, err := udpJig.CreateUDPService(ctx, nil) framework.ExpectNoError(err) svcPort := int(udpService.Spec.Ports[0].Port) framework.Logf("service port UDP: %d", svcPort) ginkgo.By("creating a pod to be part of the UDP service " + serviceName) - _, err = udpJig.Run(nil) + _, err = udpJig.Run(ctx, nil) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns2, "execpod", nil) - err = udpJig.CheckServiceReachability(udpService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns2, "execpod", nil) + err = udpJig.CheckServiceReachability(ctx, udpService, execPod) framework.ExpectNoError(err) // Change the services to NodePort. ginkgo.By("changing the UDP service to type=NodePort") - udpService, err = udpJig.UpdateService(func(s *v1.Service) { + udpService, err = udpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) framework.ExpectNoError(err) udpNodePort := int(udpService.Spec.Ports[0].NodePort) framework.Logf("UDP node port: %d", udpNodePort) - err = udpJig.CheckServiceReachability(udpService, execPod) + err = udpJig.CheckServiceReachability(ctx, udpService, execPod) framework.ExpectNoError(err) ginkgo.By("hitting the UDP service's NodePort") @@ -436,7 +436,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Logf("Allocated static load balancer IP: %s", requestedIP) ginkgo.By("changing the UDP service to type=LoadBalancer") - _, err = udpJig.UpdateService(func(s *v1.Service) { + _, err = udpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) framework.ExpectNoError(err) @@ -460,7 +460,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { var udpIngressIP string ginkgo.By("waiting for the UDP service to have a load balancer") // 2nd one should be faster since they ran in parallel. - udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + udpService, err = udpJig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) @@ -468,7 +468,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) framework.Logf("UDP load balancer: %s", udpIngressIP) - err = udpJig.CheckServiceReachability(udpService, execPod) + err = udpJig.CheckServiceReachability(ctx, udpService, execPod) framework.ExpectNoError(err) ginkgo.By("hitting the UDP service's NodePort") @@ -480,7 +480,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // Change the services' node ports. ginkgo.By("changing the UDP service's NodePort") - udpService, err = udpJig.ChangeServiceNodePort(udpNodePort) + udpService, err = udpJig.ChangeServiceNodePort(ctx, udpNodePort) framework.ExpectNoError(err) udpNodePortOld := udpNodePort udpNodePort = int(udpService.Spec.Ports[0].NodePort) @@ -504,7 +504,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // Change the services' main ports. ginkgo.By("changing the UDP service's port") - udpService, err = udpJig.UpdateService(func(s *v1.Service) { + udpService, err = udpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) framework.ExpectNoError(err) @@ -529,7 +529,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) ginkgo.By("Scaling the pods to 0") - err = udpJig.Scale(0) + err = udpJig.Scale(ctx, 0) framework.ExpectNoError(err) ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") @@ -539,7 +539,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) ginkgo.By("Scaling the pods to 1") - err = udpJig.Scale(1) + err = udpJig.Scale(ctx, 1) framework.ExpectNoError(err) ginkgo.By("hitting the UDP service's NodePort") @@ -551,7 +551,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // Change the services back to ClusterIP. ginkgo.By("changing UDP service back to type=ClusterIP") - udpReadback, err := udpJig.UpdateService(func(s *v1.Service) { + udpReadback, err := udpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP }) framework.ExpectNoError(err) @@ -559,7 +559,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Fail("UDP Spec.Ports[0].NodePort was not cleared") } // Wait for the load balancer to be destroyed asynchronously - _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout) + _, err = udpJig.WaitForLoadBalancerDestroy(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout) framework.ExpectNoError(err) ginkgo.By("checking the UDP NodePort is closed") @@ -573,7 +573,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // this feature currently supported only on GCE/GKE/AWS/AZURE e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) namespace := f.Namespace.Name serviceName := "lb-sourcerange" @@ -582,22 +582,22 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.By("Prepare allow source ips") // prepare the exec pods // acceptPod are allowed to access the loadbalancer - acceptPod := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) - dropPod := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) + acceptPod := e2epod.CreateExecPodOrFail(ctx, cs, namespace, "execpod-accept", nil) + dropPod := e2epod.CreateExecPodOrFail(ctx, cs, namespace, "execpod-drop", nil) ginkgo.By("creating a pod to be part of the service " + serviceName) // This container is an nginx container listening on port 80 // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response - _, err := jig.Run(nil) + _, err := jig.Run(ctx, nil) framework.ExpectNoError(err) // Make sure acceptPod is running. There are certain chances that pod might be terminated due to unexpected reasons. - acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{}) + acceptPod, err = cs.CoreV1().Pods(namespace).Get(ctx, acceptPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectNotEqual(acceptPod.Status.PodIP, "") // Create loadbalancer service with source range from node[0] and podAccept - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.LoadBalancerSourceRanges = []string{acceptPod.Status.PodIP + "/32"} }) @@ -605,10 +605,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Clean up loadbalancer service") - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, cs, svc.Namespace, svc.Name) }) - svc, err = jig.WaitForLoadBalancer(loadBalancerCreateTimeout) + svc, err = jig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) ginkgo.By("check reachability from different sources") @@ -621,13 +621,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) // Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons. - dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{}) + dropPod, err = cs.CoreV1().Pods(namespace).Get(ctx, dropPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectNotEqual(acceptPod.Status.PodIP, "") ginkgo.By("Update service LoadBalancerSourceRange and check reachability") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { // only allow access from dropPod svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"} }) @@ -641,7 +641,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) ginkgo.By("Delete LoadBalancerSourceRange field and check reachability") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.LoadBalancerSourceRanges = nil }) framework.ExpectNoError(err) @@ -656,7 +656,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.It("should be able to create an internal type load balancer [Slow]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce") - createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) pollInterval := framework.Poll * 10 namespace := f.Namespace.Name @@ -664,7 +664,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { jig := e2eservice.NewTestJig(cs, namespace, serviceName) ginkgo.By("creating pod to be part of service " + serviceName) - _, err := jig.Run(nil) + _, err := jig.Run(ctx, nil) framework.ExpectNoError(err) enableILB, disableILB := enableAndDisableInternalLB() @@ -680,7 +680,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer enableILB(svc) }) @@ -688,10 +688,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Clean up loadbalancer service") - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, cs, svc.Namespace, svc.Name) }) - svc, err = jig.WaitForLoadBalancer(createTimeout) + svc, err = jig.WaitForLoadBalancer(ctx, createTimeout) framework.ExpectNoError(err) lbIngress := &svc.Status.LoadBalancer.Ingress[0] svcPort := int(svc.Spec.Ports[0].Port) @@ -704,7 +704,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // a pod to test the service. ginkgo.By("hitting the internal load balancer from pod") framework.Logf("creating pod with host network") - hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") + hostExec := launchHostExecPod(ctx, f.ClientSet, f.Namespace.Name, "ilb-host-exec") framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) @@ -728,13 +728,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("switching to external type LoadBalancer") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { disableILB(svc) }) framework.ExpectNoError(err) framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -749,7 +749,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.By("hitting the external load balancer") framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) tcpIngressIP = e2eservice.GetIngressPoint(lbIngress) - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault) // GCE cannot test a specific IP because the test may not own it. This cloud specific condition // will be removed when GCP supports similar functionality. @@ -761,14 +761,14 @@ var _ = common.SIGDescribe("LoadBalancers", func() { internalStaticIP := netutils.AddIPOffset(base, int(offset)).String() - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = internalStaticIP enableILB(svc) }) framework.ExpectNoError(err) framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -792,7 +792,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessSSHKeyPresent() - clusterID, err := gce.GetClusterID(cs) + clusterID, err := gce.GetClusterID(ctx, cs) if err != nil { framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) } @@ -807,17 +807,17 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.By("create load balancer service") // Create loadbalancer service with source range from node[0] and podAccept - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer }) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Clean up loadbalancer service") - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, cs, svc.Namespace, svc.Name) }) - svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + svc, err = jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) hcName := gcecloud.MakeNodesHealthCheckName(clusterID) @@ -834,16 +834,16 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("restart kube-controller-manager") - if err := e2ekubesystem.RestartControllerManager(); err != nil { + if err := e2ekubesystem.RestartControllerManager(ctx); err != nil { framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err) } - if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil { + if err := e2ekubesystem.WaitForControllerManagerUp(ctx); err != nil { framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err) } ginkgo.By("health check should be reconciled") pollInterval := framework.Poll * 10 - loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) + loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs) if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) { hc, err := gceCloud.GetHTTPHealthCheck(hcName) if err != nil { @@ -865,7 +865,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { svc := getServeHostnameService("affinity-lb-esipp") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - execAffinityTestForLBService(f, cs, svc) + execAffinityTestForLBService(ctx, f, cs, svc) }) // [LinuxOnly]: Windows does not support session affinity. @@ -876,7 +876,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { svc := getServeHostnameService("affinity-lb-esipp-transition") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - execAffinityTestForLBServiceWithTransition(f, cs, svc) + execAffinityTestForLBServiceWithTransition(ctx, f, cs, svc) }) // [LinuxOnly]: Windows does not support session affinity. @@ -887,7 +887,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { svc := getServeHostnameService("affinity-lb") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster - execAffinityTestForLBService(f, cs, svc) + execAffinityTestForLBService(ctx, f, cs, svc) }) // [LinuxOnly]: Windows does not support session affinity. @@ -898,7 +898,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { svc := getServeHostnameService("affinity-lb-transition") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster - execAffinityTestForLBServiceWithTransition(f, cs, svc) + execAffinityTestForLBServiceWithTransition(ctx, f, cs, svc) }) // This test verifies if service load balancer cleanup finalizer is properly @@ -911,32 +911,32 @@ var _ = common.SIGDescribe("LoadBalancers", func() { jig := e2eservice.NewTestJig(cs, f.Namespace.Name, "lb-finalizer") ginkgo.By("Create load balancer service") - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer }) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Check that service can be deleted with finalizer") - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, cs, svc.Namespace, svc.Name) }) ginkgo.By("Wait for load balancer to serve traffic") - svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + svc, err = jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) ginkgo.By("Check if finalizer presents on service with type=LoadBalancer") - e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) + e2eservice.WaitForServiceUpdatedWithFinalizer(ctx, cs, svc.Namespace, svc.Name, true) ginkgo.By("Check if finalizer is removed on service after changed to type=ClusterIP") - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeClusterIP, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) - e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, false) + e2eservice.WaitForServiceUpdatedWithFinalizer(ctx, cs, svc.Namespace, svc.Name, false) ginkgo.By("Check if finalizer is added back to service after changed to type=LoadBalancer") - err = jig.ChangeServiceType(v1.ServiceTypeLoadBalancer, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeLoadBalancer, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) - e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) + e2eservice.WaitForServiceUpdatedWithFinalizer(ctx, cs, svc.Namespace, svc.Name, true) }) ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func(ctx context.Context) { @@ -947,7 +947,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { if framework.ProviderIs("aws") { loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. @@ -956,19 +956,19 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ns1 := f.Namespace.Name // LB1 in ns1 on TCP framework.Logf("namespace for TCP test: %s", ns1) - nodeIP, err := e2enode.PickIP(cs) // for later + nodeIP, err := e2enode.PickIP(ctx, cs) // for later framework.ExpectNoError(err) ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName) - tcpService, err := tcpJig.CreateTCPService(nil) + tcpService, err := tcpJig.CreateTCPService(ctx, nil) framework.ExpectNoError(err) svcPort := int(tcpService.Spec.Ports[0].Port) framework.Logf("service port TCP: %d", svcPort) ginkgo.By("creating a pod to be part of the TCP service " + serviceName) - _, err = tcpJig.Run(nil) + _, err = tcpJig.Run(ctx, nil) framework.ExpectNoError(err) // Change the services to LoadBalancer. @@ -1001,7 +1001,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("changing the TCP service to type=LoadBalancer") - _, err = tcpJig.UpdateService(func(s *v1.Service) { + _, err = tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = v1.ServiceTypeLoadBalancer s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(false) @@ -1010,7 +1010,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { ginkgo.By("waiting for the TCP service to have a load balancer") // Wait for the load balancer to be created asynchronously - tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + tcpService, err = tcpJig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) if int(tcpService.Spec.Ports[0].NodePort) != 0 { framework.Failf("TCP Spec.Ports[0].NodePort allocated %d when not expected", tcpService.Spec.Ports[0].NodePort) @@ -1040,12 +1040,12 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' node ports. ginkgo.By("adding a TCP service's NodePort") - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + tcpService, err = tcpJig.UpdateService(ctx, func(s *v1.Service) { s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true) }) framework.ExpectNoError(err) @@ -1059,17 +1059,17 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.Logf("TCP node port: %d", tcpNodePort) ginkgo.By("hitting the TCP service's new NodePort") - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout) }) ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") ns := f.Namespace.Name - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { e2eskipper.Skipf( @@ -1078,12 +1078,12 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) // Create a LoadBalancer service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a UDP service " + serviceName + " with type=LoadBalancer in " + ns) - _, err = udpJig.CreateUDPService(func(svc *v1.Service) { + _, err = udpJig.CreateUDPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(80)}, @@ -1093,7 +1093,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { var udpIngressIP string ginkgo.By("waiting for the UDP service to have a load balancer") - udpService, err := udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + udpService, err := udpJig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) @@ -1152,9 +1152,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() { serverPod1.Spec.Hostname = "hostname1" nodeSelection := e2epod.NodeSelection{Name: nodes.Items[0].Name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - e2epod.NewPodClient(f).CreateSync(serverPod1) + e2epod.NewPodClient(f).CreateSync(ctx, serverPod1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}}) // Note that the fact that Endpoints object already exists, does NOT mean // that iptables (or whatever else is used) was already programmed. @@ -1177,13 +1177,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { serverPod2.Spec.Hostname = "hostname2" nodeSelection = e2epod.NodeSelection{Name: nodes.Items[1].Name} e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - e2epod.NewPodClient(f).CreateSync(serverPod2) + e2epod.NewPodClient(f).CreateSync(ctx, serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(f).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) // Check that the second pod keeps receiving traffic // UDP conntrack entries timeout is 30 sec by default @@ -1201,7 +1201,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") ns := f.Namespace.Name - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 1) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 1) framework.ExpectNoError(err) if len(nodes.Items) < 1 { e2eskipper.Skipf( @@ -1210,12 +1210,12 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) // Create a LoadBalancer service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a UDP service " + serviceName + " with type=LoadBalancer in " + ns) - _, err = udpJig.CreateUDPService(func(svc *v1.Service) { + _, err = udpJig.CreateUDPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(80)}, @@ -1225,7 +1225,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { var udpIngressIP string ginkgo.By("waiting for the UDP service to have a load balancer") - udpService, err := udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + udpService, err := udpJig.WaitForLoadBalancer(ctx, loadBalancerCreateTimeout) framework.ExpectNoError(err) udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) @@ -1284,9 +1284,9 @@ var _ = common.SIGDescribe("LoadBalancers", func() { serverPod1.Spec.Hostname = "hostname1" nodeSelection := e2epod.NodeSelection{Name: nodes.Items[0].Name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - e2epod.NewPodClient(f).CreateSync(serverPod1) + e2epod.NewPodClient(f).CreateSync(ctx, serverPod1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}}) // Note that the fact that Endpoints object already exists, does NOT mean // that iptables (or whatever else is used) was already programmed. @@ -1309,13 +1309,13 @@ var _ = common.SIGDescribe("LoadBalancers", func() { serverPod2.Spec.Hostname = "hostname2" // use the same node as previous pod e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - e2epod.NewPodClient(f).CreateSync(serverPod2) + e2epod.NewPodClient(f).CreateSync(ctx, serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(f).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) // Check that the second pod keeps receiving traffic // UDP conntrack entries timeout is 30 sec by default @@ -1329,22 +1329,22 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } }) - ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow]", func() { + ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow]", func(ctx context.Context) { // We start with a low but reasonable threshold to analyze the results. // The goal is to achieve 99% minimum success rate. // TODO: We should do incremental steps toward the goal. minSuccessRate := 0.95 - testRollingUpdateLBConnectivityDisruption(f, v1.ServiceExternalTrafficPolicyTypeCluster, minSuccessRate) + testRollingUpdateLBConnectivityDisruption(ctx, f, v1.ServiceExternalTrafficPolicyTypeCluster, minSuccessRate) }) - ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow]", func() { + ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow]", func(ctx context.Context) { // We start with a low but reasonable threshold to analyze the results. // The goal is to achieve 99% minimum success rate. // TODO: We should do incremental steps toward the goal. minSuccessRate := 0.95 - testRollingUpdateLBConnectivityDisruption(f, v1.ServiceExternalTrafficPolicyTypeLocal, minSuccessRate) + testRollingUpdateLBConnectivityDisruption(ctx, f, v1.ServiceExternalTrafficPolicyTypeLocal, minSuccessRate) }) }) @@ -1357,17 +1357,17 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { var subnetPrefix *net.IPNet var err error - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // requires cloud load-balancer support - this feature currently supported only on GCE/GKE e2eskipper.SkipUnlessProviderIs("gce", "gke") cs = f.ClientSet - loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs) - subnetPrefix, err = getSubnetPrefix(cs) + loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) + subnetPrefix, err = getSubnetPrefix(ctx, cs) framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { DescribeSvc(f.Namespace.Name) } @@ -1378,23 +1378,23 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { serviceName := "external-local-lb" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) + svc, err := jig.CreateOnlyLocalLoadBalancerService(ctx, loadBalancerCreateTimeout, true, nil) framework.ExpectNoError(err) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) if healthCheckNodePort == 0 { framework.Failf("Service HealthCheck NodePort was not allocated") } ginkgo.DeferCleanup(func(ctx context.Context) { - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) // Make sure we didn't leak the health check node port. const threshold = 2 - nodes, err := getEndpointNodesWithInternalIP(jig) + nodes, err := getEndpointNodesWithInternalIP(ctx, jig) framework.ExpectNoError(err) - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) for _, internalIP := range nodes { - err := testHTTPHealthCheckNodePortFromTestContainer( + err := testHTTPHealthCheckNodePortFromTestContainer(ctx, config, internalIP, healthCheckNodePort, @@ -1435,7 +1435,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { serviceName := "external-local-nodeport" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - svc, err := jig.CreateOnlyLocalNodePortService(true) + svc, err := jig.CreateOnlyLocalNodePortService(ctx, true) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) @@ -1444,15 +1444,15 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { tcpNodePort := int(svc.Spec.Ports[0].NodePort) - endpointsNodeMap, err := getEndpointNodesWithInternalIP(jig) + endpointsNodeMap, err := getEndpointNodesWithInternalIP(ctx, jig) framework.ExpectNoError(err) dialCmd := "clientip" - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) for nodeName, nodeIP := range endpointsNodeMap { ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v:%v/%v", nodeName, nodeIP, tcpNodePort, dialCmd)) - clientIP, err := GetHTTPContentFromTestContainer(config, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, dialCmd) + clientIP, err := GetHTTPContentFromTestContainer(ctx, config, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, dialCmd) framework.ExpectNoError(err) framework.Logf("ClientIP detected by target pod using NodePort is %s, the ip of test container is %s", clientIP, config.TestContainerPod.Status.PodIP) // the clientIP returned by agnhost contains port @@ -1466,10 +1466,10 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { namespace := f.Namespace.Name serviceName := "external-local-nodes" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests) framework.ExpectNoError(err) - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false, + svc, err := jig.CreateOnlyLocalLoadBalancerService(ctx, loadBalancerCreateTimeout, false, func(svc *v1.Service) { // Change service port to avoid collision with opened hostPorts // in other tests that run in parallel. @@ -1481,9 +1481,9 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { }) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -1498,12 +1498,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { svcTCPPort := int(svc.Spec.Ports[0].Port) const threshold = 2 - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) for i := 0; i < len(nodes.Items); i++ { endpointNodeName := nodes.Items[i].Name ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) - _, err = jig.Run(func(rc *v1.ReplicationController) { + _, err = jig.Run(ctx, func(rc *v1.ReplicationController) { rc.Name = serviceName if endpointNodeName != "" { rc.Spec.Template.Spec.NodeName = endpointNodeName @@ -1512,7 +1512,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) - err = jig.WaitForEndpointOnNode(endpointNodeName) + err = jig.WaitForEndpointOnNode(ctx, endpointNodeName) framework.ExpectNoError(err) // HealthCheck should pass only on the node where num(endpoints) > 0 @@ -1520,12 +1520,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { for n, internalIP := range ips { // Make sure the loadbalancer picked up the health check change. // Confirm traffic can reach backend through LB before checking healthcheck nodeport. - e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout) + e2eservice.TestReachableHTTP(ctx, ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout) expectedSuccess := nodes.Items[n].Name == endpointNodeName port := strconv.Itoa(healthCheckNodePort) ipPort := net.JoinHostPort(internalIP, port) framework.Logf("Health checking %s, http://%s/healthz, expectedSuccess %v", nodes.Items[n].Name, ipPort, expectedSuccess) - err := testHTTPHealthCheckNodePortFromTestContainer( + err := testHTTPHealthCheckNodePortFromTestContainer(ctx, config, internalIP, healthCheckNodePort, @@ -1534,7 +1534,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { threshold) framework.ExpectNoError(err) } - framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) + framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, namespace, serviceName)) } }) @@ -1544,12 +1544,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { serviceName := "external-local-pods" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) + svc, err := jig.CreateOnlyLocalLoadBalancerService(ctx, loadBalancerCreateTimeout, true, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -1559,21 +1559,21 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { path := fmt.Sprintf("%s/clientip", ipPort) ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state") - deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) + deployment := createPausePodDeployment(ctx, cs, "pause-pod-deployment", namespace, 1) framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Deleting deployment") - err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) + err = cs.AppsV1().Deployments(namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }) - deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) + deployment, err = cs.AppsV1().Deployments(namespace).Get(ctx, deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error in retrieving pause pod deployment") labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment") - pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + pausePods, err := cs.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") pausePod := pausePods.Items[0] @@ -1581,7 +1581,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path) var srcIP string - loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) + loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs) ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName)) if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) { stdout, err := e2eoutput.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) @@ -1601,18 +1601,18 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { serviceName := "external-local-update" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests) framework.ExpectNoError(err) if len(nodes.Items) < 2 { framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") } - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) + svc, err := jig.CreateOnlyLocalLoadBalancerService(ctx, loadBalancerCreateTimeout, true, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) + err = jig.ChangeServiceType(ctx, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -1620,7 +1620,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) ginkgo.By("turning ESIPP off") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster }) framework.ExpectNoError(err) @@ -1628,7 +1628,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { framework.Failf("Service HealthCheck NodePort still present") } - epNodes, err := jig.ListNodesWithEndpoint() + epNodes, err := jig.ListNodesWithEndpoint(ctx) framework.ExpectNoError(err) // map from name of nodes with endpoint to internal ip // it is assumed that there is only a single node with the endpoint @@ -1660,12 +1660,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { path := "/clientip" dialCmd := "clientip" - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) for nodeName, nodeIP := range noEndpointNodeMap { ginkgo.By(fmt.Sprintf("Checking %v (%v:%v/%v) proxies to endpoints on another node", nodeName, nodeIP[0], svcNodePort, dialCmd)) - _, err := GetHTTPContentFromTestContainer(config, nodeIP, svcNodePort, e2eservice.KubeProxyLagTimeout, dialCmd) + _, err := GetHTTPContentFromTestContainer(ctx, config, nodeIP, svcNodePort, e2eservice.KubeProxyLagTimeout, dialCmd) framework.ExpectNoError(err, "Could not reach HTTP service through %v:%v/%v after %v", nodeIP, svcNodePort, dialCmd, e2eservice.KubeProxyLagTimeout) } @@ -1674,7 +1674,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { var body string pollFn := func() (bool, error) { // we expect connection failure here, but not other errors - resp, err := config.GetResponseFromTestContainer( + resp, err := config.GetResponseFromTestContainer(ctx, "http", "healthz", nodeIP, @@ -1731,13 +1731,13 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { // creation will fail. ginkgo.By("setting ExternalTraffic field back to OnlyLocal") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal // Request the same healthCheckNodePort as before, to test the user-requested allocation path svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort) }) framework.ExpectNoError(err) - loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) + loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs) pollErr = wait.PollImmediate(framework.PollShortTimeout, loadBalancerPropagationTimeout, func() (bool, error) { clientIPPort, err := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path) if err != nil { @@ -1766,7 +1766,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { }) }) -func testRollingUpdateLBConnectivityDisruption(f *framework.Framework, externalTrafficPolicy v1.ServiceExternalTrafficPolicyType, minSuccessRate float64) { +func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework.Framework, externalTrafficPolicy v1.ServiceExternalTrafficPolicyType, minSuccessRate float64) { cs := f.ClientSet ns := f.Namespace.Name name := "test-lb-rolling-update" @@ -1788,7 +1788,7 @@ func testRollingUpdateLBConnectivityDisruption(f *framework.Framework, externalT ds.Spec.Template.Labels = labels ds.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod) - nodeNames := e2edaemonset.SchedulableNodes(cs, ds) + nodeNames := e2edaemonset.SchedulableNodes(ctx, cs, ds) e2eskipper.SkipUnlessAtLeast(len(nodeNames), 2, "load-balancer rolling update test requires at least 2 schedulable nodes for the DaemonSet") ginkgo.By(fmt.Sprintf("Creating DaemonSet %q", name)) @@ -1796,16 +1796,16 @@ func testRollingUpdateLBConnectivityDisruption(f *framework.Framework, externalT framework.ExpectNoError(err) ginkgo.By("Checking that daemon pods launch on every schedulable node of the cluster") - creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) - err = wait.PollImmediate(framework.Poll, creationTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames)) + creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) + err = wait.PollImmediateWithContext(ctx, framework.Poll, creationTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames)) framework.ExpectNoError(err, "error waiting for daemon pods to start") - err = e2edaemonset.CheckDaemonStatus(f, name) + err = e2edaemonset.CheckDaemonStatus(ctx, f, name) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Creating a service %s with type=LoadBalancer externalTrafficPolicy=%s in namespace %s", name, externalTrafficPolicy, ns)) jig := e2eservice.NewTestJig(cs, ns, name) jig.Labels = labels - service, err := jig.CreateLoadBalancerService(creationTimeout, func(svc *v1.Service) { + service, err := jig.CreateLoadBalancerService(ctx, creationTimeout, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = externalTrafficPolicy }) framework.ExpectNoError(err) @@ -1818,7 +1818,7 @@ func testRollingUpdateLBConnectivityDisruption(f *framework.Framework, externalT if framework.ProviderIs("aws") { timeout = e2eservice.LoadBalancerLagTimeoutAWS } - e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) + e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout) ginkgo.By("Starting a goroutine to continuously hit the DaemonSet's pods through the service's load balancer") var totalRequests uint64 = 0 @@ -1937,19 +1937,19 @@ func testRollingUpdateLBConnectivityDisruption(f *framework.Framework, externalT } // assert that the load balancer address is still reachable after the rolling updates are finished - e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) + e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout) } // getRandomNodeIP gets an IP address from a random worker node. // These tests exercise traffic coming from outside the traffic, // so it prefers ExternalIPs over InternalIPs. -func getRandomNodeIP(cs clientset.Interface) (string, error) { +func getRandomNodeIP(ctx context.Context, cs clientset.Interface) (string, error) { family := v1.IPv4Protocol if framework.TestContext.ClusterIsIPv6() { family = v1.IPv6Protocol } - node, err := e2enode.GetRandomReadySchedulableNode(cs) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) if err != nil { return "", err } diff --git a/test/e2e/network/netpol/kubemanager.go b/test/e2e/network/netpol/kubemanager.go index 35f23fdade3..f7ee9956e86 100644 --- a/test/e2e/network/netpol/kubemanager.go +++ b/test/e2e/network/netpol/kubemanager.go @@ -80,11 +80,11 @@ func newKubeManager(framework *framework.Framework, dnsDomain string) *kubeManag } // initializeCluster initialized the cluster, creating namespaces pods and services as needed. -func (k *kubeManager) initializeClusterFromModel(model *Model) error { +func (k *kubeManager) initializeClusterFromModel(ctx context.Context, model *Model) error { var createdPods []*v1.Pod for _, ns := range model.Namespaces { // no labels needed, we just need the default kubernetes.io/metadata.name label - namespace, err := k.framework.CreateNamespace(ns.BaseName, nil) + namespace, err := k.framework.CreateNamespace(ctx, ns.BaseName, nil) if err != nil { return err } @@ -96,13 +96,13 @@ func (k *kubeManager) initializeClusterFromModel(model *Model) error { // note that we defer the logic of pod (i.e. node selector) specifics to the model // which is aware of linux vs windows pods - kubePod, err := k.createPod(pod.KubePod(namespaceName)) + kubePod, err := k.createPod(ctx, pod.KubePod(namespaceName)) if err != nil { return err } createdPods = append(createdPods, kubePod) - svc, err := k.createService(pod.Service(namespaceName)) + svc, err := k.createService(ctx, pod.Service(namespaceName)) if err != nil { return err } @@ -121,7 +121,7 @@ func (k *kubeManager) initializeClusterFromModel(model *Model) error { } for _, createdPod := range createdPods { - err := e2epod.WaitForPodRunningInNamespace(k.clientSet, createdPod) + err := e2epod.WaitForPodRunningInNamespace(ctx, k.clientSet, createdPod) if err != nil { return fmt.Errorf("unable to wait for pod %s/%s: %w", createdPod.Namespace, createdPod.Name, err) } @@ -147,8 +147,8 @@ func (k *kubeManager) NamespaceNames() []string { } // getPod gets a pod by namespace and name. -func (k *kubeManager) getPod(ns string, name string) (*v1.Pod, error) { - kubePod, err := k.clientSet.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) +func (k *kubeManager) getPod(ctx context.Context, ns string, name string) (*v1.Pod, error) { + kubePod, err := k.clientSet.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("unable to get pod %s/%s: %w", ns, name, err) } @@ -204,11 +204,11 @@ func (k *kubeManager) executeRemoteCommand(namespace string, pod string, contain } // createService is a convenience function for service setup. -func (k *kubeManager) createService(service *v1.Service) (*v1.Service, error) { +func (k *kubeManager) createService(ctx context.Context, service *v1.Service) (*v1.Service, error) { ns := service.Namespace name := service.Name - createdService, err := k.clientSet.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) + createdService, err := k.clientSet.CoreV1().Services(ns).Create(ctx, service, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("unable to create service %s/%s: %w", ns, name, err) } @@ -216,11 +216,11 @@ func (k *kubeManager) createService(service *v1.Service) (*v1.Service, error) { } // createPod is a convenience function for pod setup. -func (k *kubeManager) createPod(pod *v1.Pod) (*v1.Pod, error) { +func (k *kubeManager) createPod(ctx context.Context, pod *v1.Pod) (*v1.Pod, error) { ns := pod.Namespace framework.Logf("creating pod %s/%s", ns, pod.Name) - createdPod, err := k.clientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := k.clientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("unable to create pod %s/%s: %w", ns, pod.Name, err) } @@ -228,16 +228,16 @@ func (k *kubeManager) createPod(pod *v1.Pod) (*v1.Pod, error) { } // cleanNetworkPolicies is a convenience function for deleting network policies before startup of any new test. -func (k *kubeManager) cleanNetworkPolicies() error { +func (k *kubeManager) cleanNetworkPolicies(ctx context.Context) error { for _, ns := range k.namespaceNames { framework.Logf("deleting policies in %s ..........", ns) - l, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) + l, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).List(ctx, metav1.ListOptions{}) if err != nil { return fmt.Errorf("unable to list network policies in ns %s: %w", ns, err) } for _, np := range l.Items { framework.Logf("deleting network policy %s/%s", ns, np.Name) - err = k.clientSet.NetworkingV1().NetworkPolicies(ns).Delete(context.TODO(), np.Name, metav1.DeleteOptions{}) + err = k.clientSet.NetworkingV1().NetworkPolicies(ns).Delete(ctx, np.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("unable to delete network policy %s/%s: %w", ns, np.Name, err) } @@ -247,10 +247,10 @@ func (k *kubeManager) cleanNetworkPolicies() error { } // createNetworkPolicy is a convenience function for creating network policies. -func (k *kubeManager) createNetworkPolicy(ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) { +func (k *kubeManager) createNetworkPolicy(ctx context.Context, ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) { framework.Logf("creating network policy %s/%s", ns, netpol.Name) netpol.ObjectMeta.Namespace = ns - np, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).Create(context.TODO(), netpol, metav1.CreateOptions{}) + np, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).Create(ctx, netpol, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("unable to create network policy %s/%s: %w", ns, netpol.Name, err) } @@ -258,10 +258,10 @@ func (k *kubeManager) createNetworkPolicy(ns string, netpol *networkingv1.Networ } // updateNetworkPolicy is a convenience function for updating network policies. -func (k *kubeManager) updateNetworkPolicy(ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) { +func (k *kubeManager) updateNetworkPolicy(ctx context.Context, ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) { framework.Logf("updating network policy %s/%s", ns, netpol.Name) netpol.ObjectMeta.Namespace = ns - np, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).Update(context.TODO(), netpol, metav1.UpdateOptions{}) + np, err := k.clientSet.NetworkingV1().NetworkPolicies(ns).Update(ctx, netpol, metav1.UpdateOptions{}) if err != nil { return np, fmt.Errorf("unable to update network policy %s/%s: %w", ns, netpol.Name, err) } @@ -269,8 +269,8 @@ func (k *kubeManager) updateNetworkPolicy(ns string, netpol *networkingv1.Networ } // getNamespace gets a namespace object from kubernetes. -func (k *kubeManager) getNamespace(ns string) (*v1.Namespace, error) { - selectedNameSpace, err := k.clientSet.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}) +func (k *kubeManager) getNamespace(ctx context.Context, ns string) (*v1.Namespace, error) { + selectedNameSpace, err := k.clientSet.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("unable to get namespace %s: %w", ns, err) } diff --git a/test/e2e/network/netpol/network_legacy.go b/test/e2e/network/netpol/network_legacy.go index 6702afb60ab..baee0c8c0e1 100644 --- a/test/e2e/network/netpol/network_legacy.go +++ b/test/e2e/network/netpol/network_legacy.go @@ -75,12 +75,12 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) ginkgo.Context("NetworkPolicy between server and client", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Creating a simple server that serves on port 80 and 81.") - podServer, service = createServerPodAndService(f, f.Namespace, "server", []protocolPort{{80, v1.ProtocolTCP}, {81, v1.ProtocolTCP}}) + podServer, service = createServerPodAndService(ctx, f, f.Namespace, "server", []protocolPort{{80, v1.ProtocolTCP}, {81, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServer.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podServer.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err) }) @@ -89,12 +89,12 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { // Create pods, which should be able to communicate with the server on port 80 and 81. ginkgo.By("Testing pods can connect to both ports when no policy is present.") - testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80) - testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81) + testCanConnect(ctx, f, f.Namespace, "client-can-connect-80", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-can-connect-81", service, 81) }) - ginkgo.AfterEach(func() { - cleanupServerPodAndService(f, podServer, service) + ginkgo.AfterEach(func(ctx context.Context) { + cleanupServerPodAndService(ctx, f, podServer, service) }) ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -108,33 +108,33 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, // but should not be able to now that isolation is on. - testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-cannot-connect", service, 80) }) ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") ginkgo.By("Creating a simple server in another namespace that serves on port 80 and 81.") - podB, serviceB := createServerPodAndService(f, nsB, "pod-b", []protocolPort{{80, v1.ProtocolTCP}, {81, v1.ProtocolTCP}}) + podB, serviceB := createServerPodAndService(ctx, f, nsB, "pod-b", []protocolPort{{80, v1.ProtocolTCP}, {81, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podB.Name, nsB.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podB.Name, nsB.Name, framework.PodStartTimeout) framework.ExpectNoError(err) }) ginkgo.By("Creating client-a, which should be able to contact the server in another namespace.", func() { - testCanConnect(f, nsA, "client-a", serviceB, 80) + testCanConnect(ctx, f, nsA, "client-a", serviceB, 80) }) policy := &networkingv1.NetworkPolicy{ @@ -149,36 +149,36 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-to-a, which should not be able to contact the server in the same namespace, Ingress check.", func() { - testCannotConnect(f, nsA, "client-to-a", service, 80) + testCannotConnect(ctx, f, nsA, "client-to-a", service, 80) }) ginkgo.By("Creating client-to-b, which should not be able to contact the server in another namespace, Egress check.", func() { - testCannotConnect(f, nsA, "client-to-b", serviceB, 80) + testCannotConnect(ctx, f, nsA, "client-to-b", serviceB, 80) }) }) ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") // All communication should be possible before applying the policy. ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsA, "client-a", service, 80) + testCanConnect(ctx, f, nsA, "client-a", service, 80) }) ginkgo.By("Creating client-b, in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsA, "client-b", service, 80) + testCanConnect(ctx, f, nsA, "client-b", service, 80) }) ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsB, "client-a", service, 80) + testCanConnect(ctx, f, nsB, "client-a", service, 80) }) ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a' in same namespace.") @@ -206,32 +206,32 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsA, "client-a", service, 80) + testCanConnect(ctx, f, nsA, "client-a", service, 80) }) ginkgo.By("Creating client-b, in server's namespace, which should not be able to contact the server.", func() { - testCannotConnect(f, nsA, "client-b", service, 80) + testCannotConnect(ctx, f, nsA, "client-b", service, 80) }) ginkgo.By("Creating client-a, not in server's namespace, which should not be able to contact the server.", func() { - testCannotConnect(f, nsB, "client-a", service, 80) + testCannotConnect(ctx, f, nsB, "client-a", service, 80) }) }) ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err) // Create Server with Service in NS-B framework.Logf("Waiting for server to come up.") - err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer) + err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, podServer) framework.ExpectNoError(err) // Create Policy for that service that allows traffic only via namespace B @@ -259,12 +259,12 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, nsA, "client-a", service, 80) - testCanConnect(f, nsB, "client-b", service, 80) + testCannotConnect(ctx, f, nsA, "client-a", service, 80) + testCanConnect(ctx, f, nsB, "client-b", service, 80) }) ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { @@ -293,28 +293,28 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating client-b which should not be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-b", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-b", service, 80) }) }) ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err) nsCName := f.BaseName + "-c" - nsC, err := f.CreateNamespace(nsCName, map[string]string{ + nsC, err := f.CreateNamespace(ctx, nsCName, map[string]string{ "ns-name": nsCName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsCName, err) @@ -345,18 +345,18 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, nsC, "client-a", service, 80) - testCanConnect(f, nsB, "client-a", service, 80) + testCannotConnect(ctx, f, nsC, "client-a", service, 80) + testCanConnect(ctx, f, nsB, "client-a", service, 80) }) ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err) @@ -391,19 +391,19 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCanConnect(f, nsB, "client-a", service, 80) - testCanConnect(f, nsA, "client-b", service, 80) - testCannotConnect(f, nsA, "client-c", service, 80) + testCanConnect(ctx, f, nsB, "client-a", service, 80) + testCanConnect(ctx, f, nsA, "client-b", service, 80) + testCannotConnect(ctx, f, nsA, "client-c", service, 80) }) ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err) @@ -437,40 +437,40 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, nsB, "client-a", service, 80) - testCannotConnect(f, nsA, "client-b", service, 80) - testCanConnect(f, nsB, "client-b", service, 80) + testCannotConnect(ctx, f, nsB, "client-a", service, 80) + testCannotConnect(ctx, f, nsA, "client-b", service, 80) + testCanConnect(ctx, f, nsB, "client-b", service, 80) }) ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") // Wait for Server in namespaces-a to be ready framework.Logf("Waiting for server to come up.") - err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer) + err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, podServer) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.") // Before application of the policy, all communication should be successful. ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsA, "client-a", service, 80) + testCanConnect(ctx, f, nsA, "client-a", service, 80) }) ginkgo.By("Creating client-b, in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsA, "client-b", service, 80) + testCanConnect(ctx, f, nsA, "client-b", service, 80) }) ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsB, "client-a", service, 80) + testCanConnect(ctx, f, nsB, "client-a", service, 80) }) ginkgo.By("Creating client-b, not in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsB, "client-b", service, 80) + testCanConnect(ctx, f, nsB, "client-b", service, 80) }) ginkgo.By("Creating a network policy for the server which allows traffic only from client-a in namespace-b.") @@ -504,21 +504,21 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policy.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() { - testCannotConnect(f, nsA, "client-a", service, 80) + testCannotConnect(ctx, f, nsA, "client-a", service, 80) }) ginkgo.By("Creating client-b, in server's namespace, which should not be able to contact the server.", func() { - testCannotConnect(f, nsA, "client-b", service, 80) + testCannotConnect(ctx, f, nsA, "client-b", service, 80) }) ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() { - testCanConnect(f, nsB, "client-a", service, 80) + testCanConnect(ctx, f, nsB, "client-a", service, 80) }) ginkgo.By("Creating client-b, not in server's namespace, which should not be able to contact the server.", func() { - testCannotConnect(f, nsB, "client-b", service, 80) + testCannotConnect(ctx, f, nsB, "client-b", service, 80) }) }) @@ -543,13 +543,13 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Testing pods can connect only to the port allowed by the policy.") - testCannotConnect(f, f.Namespace, "client-a", service, 80) - testCanConnect(f, f.Namespace, "client-b", service, 81) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-b", service, 81) }) ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -573,7 +573,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) @@ -597,13 +597,13 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2, metav1.CreateOptions{}) + policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy2) ginkgo.By("Testing pods can connect to both ports when both policies are present.") - testCanConnect(f, f.Namespace, "client-a", service, 80) - testCanConnect(f, f.Namespace, "client-b", service, 81) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-b", service, 81) }) ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -620,13 +620,13 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { Ingress: []networkingv1.NetworkPolicyIngressRule{{}}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.") - testCanConnect(f, f.Namespace, "client-a", service, 80) - testCanConnect(f, f.Namespace, "client-b", service, 81) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-b", service, 81) }) ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -650,21 +650,21 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() { - testCannotConnect(f, f.Namespace, "client-b", service, 81) + testCannotConnect(ctx, f, f.Namespace, "client-b", service, 81) }) }) ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err) @@ -697,12 +697,12 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, f.Namespace, "client-a", service, allowedPort) - testCanConnect(f, nsB, "client-b", service, allowedPort) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, allowedPort) + testCanConnect(ctx, f, nsB, "client-b", service, allowedPort) }) ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -729,15 +729,15 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, clientPodName, service, 80) + testCanConnect(ctx, f, f.Namespace, clientPodName, service, 80) }) ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() { - testCannotConnect(f, f.Namespace, clientPodName, service, 81) + testCannotConnect(ctx, f, f.Namespace, clientPodName, service, 81) }) }) @@ -773,19 +773,19 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) - testCanConnect(f, f.Namespace, "client-a", service, clientAAllowedPort) - e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-a", f.Namespace.Name, f.Timeouts.PodDelete) + testCanConnect(ctx, f, f.Namespace, "client-a", service, clientAAllowedPort) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "client-a", f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "Expected pod to be not found.") - testCannotConnect(f, f.Namespace, "client-b", service, clientAAllowedPort) - e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete) + testCannotConnect(ctx, f, f.Namespace, "client-b", service, clientAAllowedPort) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "Expected pod to be not found.") - testCannotConnect(f, f.Namespace, "client-a", service, clientANotAllowedPort) - e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-a", f.Namespace.Name, f.Timeouts.PodDelete) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, clientANotAllowedPort) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "client-a", f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "Expected pod to be not found.") const ( @@ -819,23 +819,23 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy, metav1.UpdateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(ctx, policy, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, f.Namespace, "client-b", service, clientBNotAllowedPort) - e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete) + testCannotConnect(ctx, f, f.Namespace, "client-b", service, clientBNotAllowedPort) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "Expected pod to be not found.") - testCannotConnect(f, f.Namespace, "client-a", service, clientBNotAllowedPort) - testCanConnect(f, f.Namespace, "client-b", service, clientBAllowedPort) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, clientBNotAllowedPort) + testCanConnect(ctx, f, f.Namespace, "client-b", service, clientBAllowedPort) }) ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" newNsBName := nsBName + "-updated" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error creating namespace %v: %v", nsBName, err) @@ -865,20 +865,20 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) - testCannotConnect(f, nsB, "client-a", service, allowedPort) + testCannotConnect(ctx, f, nsB, "client-a", service, allowedPort) - nsB, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), nsB.Name, metav1.GetOptions{}) + nsB, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, nsB.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting Namespace %v: %v", nsB.ObjectMeta.Name, err) nsB.ObjectMeta.Labels["ns-name"] = newNsBName - nsB, err = f.ClientSet.CoreV1().Namespaces().Update(context.TODO(), nsB, metav1.UpdateOptions{}) + nsB, err = f.ClientSet.CoreV1().Namespaces().Update(ctx, nsB, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Error updating Namespace %v: %v", nsB.ObjectMeta.Name, err) - testCanConnect(f, nsB, "client-b", service, allowedPort) + testCanConnect(ctx, f, nsB, "client-b", service, allowedPort) }) ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -907,26 +907,26 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name)) // Specify RestartPolicy to OnFailure so we can check the client pod fails in the beginning and succeeds // after updating its label, otherwise it would not restart after the first failure. - podClient := createNetworkClientPodWithRestartPolicy(f, f.Namespace, "client-a", service, allowedPort, v1.ProtocolTCP, v1.RestartPolicyOnFailure) + podClient := createNetworkClientPodWithRestartPolicy(ctx, f, f.Namespace, "client-a", service, allowedPort, v1.ProtocolTCP, v1.RestartPolicyOnFailure) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() // Check Container exit code as restartable Pod's Phase will be Running even when container fails. - checkNoConnectivityByExitCode(f, f.Namespace, podClient, service) + checkNoConnectivityByExitCode(ctx, f, f.Namespace, podClient, service) ginkgo.By(fmt.Sprintf("Updating client pod %s that should successfully connect to %s.", podClient.Name, service.Name)) - podClient = updatePodLabel(f, f.Namespace, podClient.Name, "replace", "/metadata/labels", map[string]string{}) - checkConnectivity(f, f.Namespace, podClient, service) + podClient = updatePodLabel(ctx, f, f.Namespace, podClient.Name, "replace", "/metadata/labels", map[string]string{}) + checkConnectivity(ctx, f, f.Namespace, podClient, service) }) ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -951,17 +951,17 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) // Client can connect to service when the network policy doesn't apply to the server pod. - testCanConnect(f, f.Namespace, "client-a", service, allowedPort) + testCanConnect(ctx, f, f.Namespace, "client-a", service, allowedPort) // Client cannot connect to service after updating the server pod's labels to match the network policy's selector. ginkgo.By(fmt.Sprintf("Updating server pod %s to be selected by network policy %s.", podServer.Name, policy.Name)) - updatePodLabel(f, f.Namespace, podServer.Name, "add", "/metadata/labels/isolated", nil) - testCannotConnect(f, f.Namespace, "client-a", service, allowedPort) + updatePodLabel(ctx, f, f.Namespace, podServer.Name, "add", "/metadata/labels/isolated", nil) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, allowedPort) }) ginkgo.It("should work with Ingress,Egress specified together [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -969,17 +969,17 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { const notAllowedPort = 81 nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") - podB, serviceB := createServerPodAndService(f, nsB, "pod-b", []protocolPort{{allowedPort, v1.ProtocolTCP}, {notAllowedPort, v1.ProtocolTCP}}) + podB, serviceB := createServerPodAndService(ctx, f, nsB, "pod-b", []protocolPort{{allowedPort, v1.ProtocolTCP}, {notAllowedPort, v1.ProtocolTCP}}) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podB, serviceB) // Wait for Server with Service in NS-B to be ready framework.Logf("Waiting for servers to be ready.") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podB.Name, nsB.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podB.Name, nsB.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Ready.") ginkgo.By("Create a network policy for the server which denies both Ingress and Egress traffic.") @@ -1020,24 +1020,24 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("client-a should be able to communicate with server port 80 in namespace-b", func() { - testCanConnect(f, f.Namespace, "client-a", serviceB, allowedPort) + testCanConnect(ctx, f, f.Namespace, "client-a", serviceB, allowedPort) }) ginkgo.By("client-b should be able to communicate with server port 80 in namespace-a", func() { - testCanConnect(f, nsB, "client-b", service, allowedPort) + testCanConnect(ctx, f, nsB, "client-b", service, allowedPort) }) ginkgo.By("client-a should not be able to communicate with server port 81 in namespace-b", func() { - testCannotConnect(f, f.Namespace, "client-a", serviceB, notAllowedPort) + testCannotConnect(ctx, f, f.Namespace, "client-a", serviceB, notAllowedPort) }) ginkgo.By("client-b should not be able to communicate with server port 81 in namespace-a", func() { - testCannotConnect(f, nsB, "client-b", service, notAllowedPort) + testCannotConnect(ctx, f, nsB, "client-b", service, notAllowedPort) }) }) @@ -1047,27 +1047,27 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") // Creating pods and services in namespace-b - nsBpodServerA, nsBserviceA = createServerPodAndService(f, nsB, "ns-b-server-a", []protocolPort{{80, v1.ProtocolTCP}}) + nsBpodServerA, nsBserviceA = createServerPodAndService(ctx, f, nsB, "ns-b-server-a", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerA, nsBserviceA) - nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []protocolPort{{80, v1.ProtocolTCP}}) + nsBpodServerB, nsBserviceB = createServerPodAndService(ctx, f, nsB, "ns-b-server-b", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerB, nsBserviceB) // Wait for Server with Service in NS-A to be ready framework.Logf("Waiting for servers to be ready.") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServer.Name, podServer.Namespace, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podServer.Name, podServer.Namespace, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Ready.") // Wait for Servers with Services in NS-B to be ready - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, nsBpodServerA.Name, nsBpodServerA.Namespace, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, nsBpodServerA.Name, nsBpodServerA.Namespace, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Ready.") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, nsBpodServerB.Name, nsBpodServerB.Namespace, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, nsBpodServerB.Name, nsBpodServerB.Namespace, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Ready.") ginkgo.By("Creating a network policy for the server which allows traffic only to a server in different namespace.") @@ -1106,18 +1106,18 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{}) + policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowToServerInNSB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToServerInNSB) ginkgo.By("Creating client-a, in 'namespace-a', which should be able to contact the server-a in namespace-b.", func() { - testCanConnect(f, nsA, "client-a", nsBserviceA, 80) + testCanConnect(ctx, f, nsA, "client-a", nsBserviceA, 80) }) ginkgo.By("Creating client-a, in 'namespace-a', which should not be able to contact the server-b in namespace-b.", func() { - testCannotConnect(f, nsA, "client-a", nsBserviceB, 80) + testCannotConnect(ctx, f, nsA, "client-a", nsBserviceB, 80) }) ginkgo.By("Creating client-a, in 'namespace-a', which should not be able to contact the server in namespace-a.", func() { - testCannotConnect(f, nsA, "client-a", service, 80) + testCannotConnect(ctx, f, nsA, "client-a", service, 80) }) }) @@ -1149,15 +1149,15 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB, metav1.CreateOptions{}) + policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowOnlyFromClientB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyFromClientB) ginkgo.By("Creating client-a which should not be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating client-b which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-b", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-b", service, 80) }) ginkgo.By("Creating a network policy for the server which allows traffic from all clients.") @@ -1176,29 +1176,29 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll, metav1.CreateOptions{}) + policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyIngressAllowAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyIngressAllowAll) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating client-b which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-b", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-b", service, 80) }) }) ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { - podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}}) + podServerB, serviceB := createServerPodAndService(ctx, f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB) ginkgo.By("Waiting for pod ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) ginkgo.By("Creating client-a which should be able to contact the server before applying policy.", func() { - testCanConnect(f, f.Namespace, "client-a", serviceB, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", serviceB, 80) }) ginkgo.By("Creating a network policy for the server which allows traffic only to server-a.") @@ -1231,15 +1231,15 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, }, } - policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA, metav1.CreateOptions{}) + policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowOnlyToServerA, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyToServerA) ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() { - testCannotConnect(f, f.Namespace, "client-a", serviceB, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", serviceB, 80) }) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating a network policy which allows traffic to all pods.") @@ -1257,15 +1257,15 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll, metav1.CreateOptions{}) + policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyEgressAllowAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyEgressAllowAll) ginkgo.By("Creating client-a which should be able to contact the server-b.", func() { - testCanConnect(f, f.Namespace, "client-a", serviceB, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", serviceB, 80) }) ginkgo.By("Creating client-a which should be able to contact the server-a.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) }) @@ -1284,11 +1284,11 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyAll, metav1.CreateOptions{}) + policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyDenyAll, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyAll.") ginkgo.By("Creating client-a which should not be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Creating a network policy for the server which allows traffic only from client-a.") @@ -1318,25 +1318,25 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowFromClientA, metav1.CreateOptions{}) + policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowFromClientA, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowFromClientA.") ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Deleting the network policy allowing traffic from client-a") - cleanupNetworkPolicy(f, policyAllowFromClientA) + cleanupNetworkPolicy(ctx, f, policyAllowFromClientA) ginkgo.By("Creating client-a which should not be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Deleting the network policy denying all traffic.") - cleanupNetworkPolicy(f, policyDenyAll) + cleanupNetworkPolicy(ctx, f, policyDenyAll) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) }) @@ -1345,7 +1345,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { var podServerB *v1.Pod // Getting podServer's status to get podServer's IP, to create the CIDR - podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) + podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podServer.Name, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Error occurred while getting pod status.") } @@ -1356,19 +1356,19 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { podServerCIDR := fmt.Sprintf("%s/%d", podServerStatus.Status.PodIP, hostMask) // Creating pod-b and service-b - podServerB, serviceB = createServerPodAndService(f, f.Namespace, "pod-b", []protocolPort{{80, v1.ProtocolTCP}}) + podServerB, serviceB = createServerPodAndService(ctx, f, f.Namespace, "pod-b", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod-b to be ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB) // Wait for podServerB with serviceB to be ready - err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServerB) + err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, podServerB) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.") ginkgo.By("Creating client-a which should be able to contact the server-b.", func() { - testCanConnect(f, f.Namespace, "client-a", serviceB, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", serviceB, 80) }) policyAllowCIDR := &networkingv1.NetworkPolicy{ @@ -1399,21 +1399,21 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR, metav1.CreateOptions{}) + policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowCIDR, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDR) ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() { - testCannotConnect(f, f.Namespace, "client-a", serviceB, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", serviceB, 80) }) ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) }) ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause - podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) + podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podServer.Name, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Error occurred while getting pod status.") } @@ -1433,7 +1433,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { // client-a can connect to server prior to applying the NetworkPolicy ginkgo.By("Creating client-a which should be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) policyAllowCIDRWithExcept := &networkingv1.NetworkPolicy{ @@ -1465,18 +1465,18 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowCIDRWithExcept, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExcept, metav1.CreateOptions{}) + policyAllowCIDRWithExcept, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowCIDRWithExcept, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExcept.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExcept) ginkgo.By("Creating client-a which should no longer be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) }) }) ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause - podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) + podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podServer.Name, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Error occurred while getting pod status.") } @@ -1525,11 +1525,11 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowCIDRWithExceptServerPodObj, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{}) + policyAllowCIDRWithExceptServerPodObj, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExceptServerPod.") ginkgo.By("Creating client-a which should not be able to contact the server.", func() { - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) }) // Create NetworkPolicy which allows access to the podServer using podServer's IP in allow CIDR. @@ -1561,28 +1561,28 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowCIDRServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRServerPod, metav1.CreateOptions{}) + policyAllowCIDRServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowCIDRServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRServerPod.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRServerPod) ginkgo.By("Creating client-a which should now be able to contact the server.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) ginkgo.By("Deleting the network policy with except podServer IP which disallows access to podServer.") - cleanupNetworkPolicy(f, policyAllowCIDRWithExceptServerPodObj) + cleanupNetworkPolicy(ctx, f, policyAllowCIDRWithExceptServerPodObj) ginkgo.By("Creating client-a which should still be able to contact the server after deleting the network policy with except clause.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) // Recreate the NetworkPolicy which contains the podServer's IP in the except list. - policyAllowCIDRWithExceptServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{}) + policyAllowCIDRWithExceptServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExceptServerPod.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExceptServerPod) ginkgo.By("Creating client-a which should still be able to contact the server after recreating the network policy with except clause.", func() { - testCanConnect(f, f.Namespace, "client-a", service, 80) + testCanConnect(ctx, f, f.Namespace, "client-a", service, 80) }) }) @@ -1592,23 +1592,23 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { var err error // Before applying policy, communication should be successful between pod-a and pod-b - podA, serviceA = createServerPodAndService(f, f.Namespace, "pod-a", []protocolPort{{80, v1.ProtocolTCP}}) + podA, serviceA = createServerPodAndService(ctx, f, f.Namespace, "pod-a", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod-a to be ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podA.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podA.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) ginkgo.By("Creating client pod-b which should be able to contact the server pod-a.", func() { - testCanConnect(f, f.Namespace, "pod-b", serviceA, 80) + testCanConnect(ctx, f, f.Namespace, "pod-b", serviceA, 80) }) - cleanupServerPodAndService(f, podA, serviceA) + cleanupServerPodAndService(ctx, f, podA, serviceA) - podB, serviceB = createServerPodAndService(f, f.Namespace, "pod-b", []protocolPort{{80, v1.ProtocolTCP}}) + podB, serviceB = createServerPodAndService(ctx, f, f.Namespace, "pod-b", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod-b to be ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podB.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podB.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() { - testCanConnect(f, f.Namespace, "pod-a", serviceB, 80) + testCanConnect(ctx, f, f.Namespace, "pod-a", serviceB, 80) }) ginkgo.By("Creating a network policy for pod-a which allows Egress traffic to pod-b.") @@ -1642,7 +1642,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB, metav1.CreateOptions{}) + policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyAllowToPodB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToPodB) @@ -1665,35 +1665,35 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }, } - policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB, metav1.CreateOptions{}) + policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policyDenyFromPodB, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyDenyFromPodB) ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() { - testCanConnect(f, f.Namespace, "pod-a", serviceB, 80) + testCanConnect(ctx, f, f.Namespace, "pod-a", serviceB, 80) }) - cleanupServerPodAndService(f, podB, serviceB) + cleanupServerPodAndService(ctx, f, podB, serviceB) // Creating server pod with label "pod-name": "pod-a" to deny traffic from client pod with label "pod-name": "pod-b" - podA, serviceA = createServerPodAndService(f, f.Namespace, "pod-a", []protocolPort{{80, v1.ProtocolTCP}}) + podA, serviceA = createServerPodAndService(ctx, f, f.Namespace, "pod-a", []protocolPort{{80, v1.ProtocolTCP}}) ginkgo.By("Waiting for pod-a to be ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podA.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podA.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) ginkgo.By("Creating client pod-b which should be able to contact the server pod-a.", func() { - testCannotConnect(f, f.Namespace, "pod-b", serviceA, 80) + testCannotConnect(ctx, f, f.Namespace, "pod-b", serviceA, 80) }) - cleanupServerPodAndService(f, podA, serviceA) + cleanupServerPodAndService(ctx, f, podA, serviceA) }) // This is [Serial] because it can't run at the same time as the // [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded. ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Serial]", func(ctx context.Context) { ginkgo.By("getting the state of the sctp module on nodes") - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) - sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 80.") policy := &networkingv1.NetworkPolicy{ @@ -1716,15 +1716,15 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }}, }, } - appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, appliedPolicy) ginkgo.By("Testing pods cannot connect on port 80 anymore when not using SCTP as protocol.") - testCannotConnect(f, f.Namespace, "client-a", service, 80) + testCannotConnect(ctx, f, f.Namespace, "client-a", service, 80) ginkgo.By("validating sctp module is still not loaded") - sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) if !sctpLoadedAtStart && sctpLoadedAtEnd { framework.Failf("The state of the sctp module has changed due to the test case") } @@ -1745,12 +1745,12 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]" }) ginkgo.Context("NetworkPolicy between server and client using SCTP", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Creating a simple server that serves on port 80 and 81.") - podServer, service = createServerPodAndService(f, f.Namespace, "server", []protocolPort{{80, v1.ProtocolSCTP}, {81, v1.ProtocolSCTP}}) + podServer, service = createServerPodAndService(ctx, f, f.Namespace, "server", []protocolPort{{80, v1.ProtocolSCTP}, {81, v1.ProtocolSCTP}}) ginkgo.By("Waiting for pod ready", func() { - err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServer.Name, f.Namespace.Name, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, podServer.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(err) }) @@ -1759,12 +1759,12 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]" // Create pods, which should be able to communicate with the server on port 80 and 81. ginkgo.By("Testing pods can connect to both ports when no policy is present.") - testCanConnectProtocol(f, f.Namespace, "client-can-connect-80", service, 80, v1.ProtocolSCTP) - testCanConnectProtocol(f, f.Namespace, "client-can-connect-81", service, 81, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, f.Namespace, "client-can-connect-80", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, f.Namespace, "client-can-connect-81", service, 81, v1.ProtocolSCTP) }) - ginkgo.AfterEach(func() { - cleanupServerPodAndService(f, podServer, service) + ginkgo.AfterEach(func(ctx context.Context) { + cleanupServerPodAndService(ctx, f, podServer, service) }) ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -1778,13 +1778,13 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]" }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, // but should not be able to now that isolation is on. - testCannotConnectProtocol(f, f.Namespace, "client-cannot-connect", service, 80, v1.ProtocolSCTP) + testCannotConnectProtocol(ctx, f, f.Namespace, "client-cannot-connect", service, 80, v1.ProtocolSCTP) }) ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { @@ -1809,40 +1809,40 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]" }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Testing pods can connect only to the port allowed by the policy.") - testCannotConnectProtocol(f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP) - testCanConnectProtocol(f, f.Namespace, "client-b", service, 81, v1.ProtocolSCTP) + testCannotConnectProtocol(ctx, f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, f.Namespace, "client-b", service, 81, v1.ProtocolSCTP) }) ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" - nsB, err := f.CreateNamespace(nsBName, map[string]string{ + nsB, err := f.CreateNamespace(ctx, nsBName, map[string]string{ "ns-name": nsBName, }) framework.ExpectNoError(err, "Error occurred while creating namespace-b.") // Wait for Server in namespaces-a to be ready framework.Logf("Waiting for server to come up.") - err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer) + err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, podServer) framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.") // Before application of the policy, all communication should be successful. ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() { - testCanConnectProtocol(f, nsA, "client-a", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, nsA, "client-a", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-b, in server's namespace, which should be able to contact the server.", func() { - testCanConnectProtocol(f, nsA, "client-b", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, nsA, "client-b", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() { - testCanConnectProtocol(f, nsB, "client-a", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, nsB, "client-a", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-b, not in server's namespace, which should be able to contact the server.", func() { - testCanConnectProtocol(f, nsB, "client-b", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, nsB, "client-b", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating a network policy for the server which allows traffic only from client-a in namespace-b.") @@ -1876,94 +1876,94 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]" }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(ctx, policy, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error occurred while creating policy: policy.") ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy) ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() { - testCannotConnectProtocol(f, nsA, "client-a", service, 80, v1.ProtocolSCTP) + testCannotConnectProtocol(ctx, f, nsA, "client-a", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-b, in server's namespace, which should not be able to contact the server.", func() { - testCannotConnectProtocol(f, nsA, "client-b", service, 80, v1.ProtocolSCTP) + testCannotConnectProtocol(ctx, f, nsA, "client-b", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-a, not in server's namespace, which should be able to contact the server.", func() { - testCanConnectProtocol(f, nsB, "client-a", service, 80, v1.ProtocolSCTP) + testCanConnectProtocol(ctx, f, nsB, "client-a", service, 80, v1.ProtocolSCTP) }) ginkgo.By("Creating client-b, not in server's namespace, which should not be able to contact the server.", func() { - testCannotConnectProtocol(f, nsB, "client-b", service, 80, v1.ProtocolSCTP) + testCannotConnectProtocol(ctx, f, nsB, "client-b", service, 80, v1.ProtocolSCTP) }) }) }) }) -func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - testCanConnectProtocol(f, ns, podName, service, targetPort, v1.ProtocolTCP) +func testCanConnect(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { + testCanConnectProtocol(ctx, f, ns, podName, service, targetPort, v1.ProtocolTCP) } -func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - testCannotConnectProtocol(f, ns, podName, service, targetPort, v1.ProtocolTCP) +func testCannotConnect(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { + testCannotConnectProtocol(ctx, f, ns, podName, service, targetPort, v1.ProtocolTCP) } -func testCanConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) { +func testCanConnectProtocol(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) { ginkgo.By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) - podClient := createNetworkClientPod(f, ns, podName, service, targetPort, protocol) + podClient := createNetworkClientPod(ctx, f, ns, podName, service, targetPort, protocol) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(ctx, podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() - checkConnectivity(f, ns, podClient, service) + checkConnectivity(ctx, f, ns, podClient, service) } -func testCannotConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) { +func testCannotConnectProtocol(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) { ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) - podClient := createNetworkClientPod(f, ns, podName, service, targetPort, protocol) + podClient := createNetworkClientPod(ctx, f, ns, podName, service, targetPort, protocol) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(ctx, podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }) - checkNoConnectivity(f, ns, podClient, service) + checkNoConnectivity(ctx, f, ns, podClient, service) } -func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { +func checkConnectivity(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { framework.Logf("Waiting for %s to complete.", podClient.Name) - err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) + err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, podClient.Name, ns.Name) framework.ExpectNoError(err, "Pod did not finish as expected.") framework.Logf("Waiting for %s to complete.", podClient.Name) - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, podClient.Name, ns.Name) if err != nil { // Dump debug information for the test namespace. - e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(ctx, f.ClientSet, f.Namespace.Name) - pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) + pods, policies, logs := collectPodsAndNetworkPolicies(ctx, f, podClient) framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) } } -func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { +func checkNoConnectivity(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { framework.Logf("Waiting for %s to complete.", podClient.Name) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, podClient.Name, ns.Name) // We expect an error here since it's a cannot connect test. // Dump debug information if the error was nil. if err == nil { // Dump debug information for the test namespace. - e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(ctx, f.ClientSet, f.Namespace.Name) - pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) + pods, policies, logs := collectPodsAndNetworkPolicies(ctx, f, podClient) framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) } } -func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { - err := e2epod.WaitForPodCondition(f.ClientSet, ns.Name, podClient.Name, "terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { +func checkNoConnectivityByExitCode(ctx context.Context, f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) { + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, ns.Name, podClient.Name, "terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { statuses := pod.Status.ContainerStatuses if len(statuses) == 0 || statuses[0].State.Terminated == nil { return false, nil @@ -1976,32 +1976,32 @@ func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, pod // We expect an error here since it's a cannot connect test. // Dump debug information if the error was nil. if err == nil { - pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) + pods, policies, logs := collectPodsAndNetworkPolicies(ctx, f, podClient) framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) // Dump debug information for the test namespace. - e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(ctx, f.ClientSet, f.Namespace.Name) } } -func collectPodsAndNetworkPolicies(f *framework.Framework, podClient *v1.Pod) ([]string, *networkingv1.NetworkPolicyList, string) { +func collectPodsAndNetworkPolicies(ctx context.Context, f *framework.Framework, podClient *v1.Pod) ([]string, *networkingv1.NetworkPolicyList, string) { // Collect pod logs when we see a failure. - logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, "client") + logs, logErr := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podClient.Name, "client") if logErr != nil && apierrors.IsNotFound(logErr) { // Pod may have already been removed; try to get previous pod logs - logs, logErr = e2epod.GetPreviousPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, fmt.Sprintf("%s-container", podClient.Name)) + logs, logErr = e2epod.GetPreviousPodLogs(ctx, f.ClientSet, f.Namespace.Name, podClient.Name, fmt.Sprintf("%s-container", podClient.Name)) } if logErr != nil { framework.Logf("Error getting container logs: %s", logErr) } // Collect current NetworkPolicies applied in the test namespace. - policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err) } // Collect the list of pods running in the test namespace. - podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{}) + podsInNS, err := e2epod.GetPodsInNamespace(ctx, f.ClientSet, f.Namespace.Name, map[string]string{}) if err != nil { framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err) } @@ -2015,7 +2015,7 @@ func collectPodsAndNetworkPolicies(f *framework.Framework, podClient *v1.Pod) ([ // Create a server pod with a listening container for each port in ports[]. // Will also assign a pod label with key: "pod-name" and label set to the given podName for later use by the network // policy. -func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, podName string, ports []protocolPort) (*v1.Pod, *v1.Service) { +func createServerPodAndService(ctx context.Context, f *framework.Framework, namespace *v1.Namespace, podName string, ports []protocolPort) (*v1.Pod, *v1.Service) { // Because we have a variable amount of ports, we'll first loop through and generate our Containers for our pod, // and ServicePorts.for our Service. containers := []v1.Container{} @@ -2070,7 +2070,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, } ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), &v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: podName + "-", Labels: map[string]string{ @@ -2087,7 +2087,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, svcName := fmt.Sprintf("svc-%s", podName) ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) - svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), &v1.Service{ + svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: svcName, }, @@ -2104,13 +2104,13 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, return pod, svc } -func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { +func cleanupServerPodAndService(ctx context.Context, f *framework.Framework, pod *v1.Pod, service *v1.Service) { ginkgo.By("Cleaning up the server.") - if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) } ginkgo.By("Cleaning up the server's service.") - if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(ctx, service.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup svc %v: %v", service.Name, err) } } @@ -2118,13 +2118,13 @@ func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1 // Create a client pod which will attempt a netcat to the provided service, on the specified port. // This client will attempt a one-shot connection, then die, without restarting the pod. // Test can then be asserted based on whether the pod quit with an error or not. -func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int, protocol v1.Protocol) *v1.Pod { - return createNetworkClientPodWithRestartPolicy(f, namespace, podName, targetService, targetPort, protocol, v1.RestartPolicyNever) +func createNetworkClientPod(ctx context.Context, f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int, protocol v1.Protocol) *v1.Pod { + return createNetworkClientPodWithRestartPolicy(ctx, f, namespace, podName, targetService, targetPort, protocol, v1.RestartPolicyNever) } // Create a client pod which will attempt a netcat to the provided service, on the specified port. // It is similar to createNetworkClientPod but supports specifying RestartPolicy. -func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int, protocol v1.Protocol, restartPolicy v1.RestartPolicy) *v1.Pod { +func createNetworkClientPodWithRestartPolicy(ctx context.Context, f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int, protocol v1.Protocol, restartPolicy v1.RestartPolicy) *v1.Pod { var connectProtocol string switch protocol { case v1.ProtocolTCP: @@ -2135,7 +2135,7 @@ func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace * framework.Failf("createNetworkClientPodWithRestartPolicy, unexpected protocol %v", protocol) } - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), &v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: podName + "-", Labels: map[string]string{ @@ -2162,7 +2162,7 @@ func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace * } // Patch pod with a map value -func updatePodLabel(f *framework.Framework, namespace *v1.Namespace, podName string, patchOperation string, patchPath string, patchValue map[string]string) *v1.Pod { +func updatePodLabel(ctx context.Context, f *framework.Framework, namespace *v1.Namespace, podName string, patchOperation string, patchPath string, patchValue map[string]string) *v1.Pod { type patchMapValue struct { Op string `json:"op"` Path string `json:"path"` @@ -2176,15 +2176,15 @@ func updatePodLabel(f *framework.Framework, namespace *v1.Namespace, podName str payloadBytes, err := json.Marshal(payload) framework.ExpectNoError(err) - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(ctx, podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) framework.ExpectNoError(err) return pod } -func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { +func cleanupNetworkPolicy(ctx context.Context, f *framework.Framework, policy *networkingv1.NetworkPolicy) { ginkgo.By("Cleaning up the policy.") - if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(ctx, policy.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) } } @@ -2261,7 +2261,7 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() { ginkgo.By("getting /apis/networking.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -2291,48 +2291,48 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() { } // NetPol resource create/read/update/watch verbs ginkgo.By("creating") - _, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdNetPol, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + createdNetPol, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenNetPol, err := npClient.Get(context.TODO(), createdNetPol.Name, metav1.GetOptions{}) + gottenNetPol, err := npClient.Get(ctx, createdNetPol.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenNetPol.UID, createdNetPol.UID) ginkgo.By("listing") - nps, err := npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - npWatch, err := npClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + npWatch, err := npClient.Watch(ctx, metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Test cluster-wide list and watch clusterNPClient := f.ClientSet.NetworkingV1().NetworkPolicies("") ginkgo.By("cluster-wide listing") - clusterNPs, err := clusterNPClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + clusterNPs, err := clusterNPClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(clusterNPs.Items), 3, "filtered list should have 3 items") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") - _, err = clusterNPClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + _, err = clusterNPClient.Watch(ctx, metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedNetPols, err := npClient.Patch(context.TODO(), createdNetPol.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedNetPols, err := npClient.Patch(ctx, createdNetPol.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedNetPols.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") npToUpdate := patchedNetPols.DeepCopy() npToUpdate.Annotations["updated"] = "true" - updatedNetPols, err := npClient.Update(context.TODO(), npToUpdate, metav1.UpdateOptions{}) + updatedNetPols, err := npClient.Update(ctx, npToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedNetPols.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -2361,20 +2361,20 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() { } // NetPol resource delete operations ginkgo.By("deleting") - err = npClient.Delete(context.TODO(), createdNetPol.Name, metav1.DeleteOptions{}) + err = npClient.Delete(ctx, createdNetPol.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = npClient.Get(context.TODO(), createdNetPol.Name, metav1.GetOptions{}) + _, err = npClient.Get(ctx, createdNetPol.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %#v", err) } - nps, err = npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 2, "filtered list should have 2 items") ginkgo.By("deleting a collection") - err = npClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = npClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - nps, err = npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 0, "filtered list should have 0 items") }) @@ -2385,14 +2385,14 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() { // For security reasons, and also to allow clusters to use userspace SCTP implementations, // we require that just creating an SCTP Pod/Service/NetworkPolicy must not do anything // that would cause the sctp kernel module to be loaded. -func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool { +func CheckSCTPModuleLoadedOnNodes(ctx context.Context, f *framework.Framework, nodes *v1.NodeList) bool { hostExec := utils.NewHostExec(f) ginkgo.DeferCleanup(hostExec.Cleanup) re := regexp.MustCompile(`^\s*sctp\s+`) cmd := "lsmod | grep sctp" for _, node := range nodes.Items { framework.Logf("Executing cmd %q on node %v", cmd, node.Name) - result, err := hostExec.IssueCommandWithResult(cmd, &node) + result, err := hostExec.IssueCommandWithResult(ctx, cmd, &node) if err != nil { framework.Logf("sctp module is not loaded or error occurred while executing command %s on node: %v", cmd, err) } diff --git a/test/e2e/network/netpol/network_policy.go b/test/e2e/network/netpol/network_policy.go index 4cb45cf02bf..67966086e1e 100644 --- a/test/e2e/network/netpol/network_policy.go +++ b/test/e2e/network/netpol/network_policy.go @@ -120,10 +120,10 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress", metav1.LabelSelector{}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) @@ -135,9 +135,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules()) protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) @@ -158,9 +158,9 @@ var _ = common.SIGDescribe("Netpol", func() { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -172,19 +172,19 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ginkgo.By("having a deny all ingress policy", func() { // Deny all Ingress traffic policy to pods on namespace nsX policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) }) // Allow Ingress traffic only to pod x/a from any pod ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: &metav1.LabelSelector{}, NamespaceSelector: &metav1.LabelSelector{}}) allowPolicy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all-to-a", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPolicy, nsX) + CreatePolicy(ctx, k8s, allowPolicy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), true) @@ -197,12 +197,12 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{}}}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-from-another-ns", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) @@ -211,12 +211,12 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{namespaceLabelKey: nsY}}}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) // disallow all traffic from the x or z namespaces @@ -240,9 +240,9 @@ var _ = common.SIGDescribe("Netpol", func() { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -254,7 +254,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -266,7 +266,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-match-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) // disallow all traffic from the x or z namespaces @@ -279,7 +279,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -296,7 +296,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces}, networkingv1.NetworkPolicyPeer{PodSelector: podBAllowlisting}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-match-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "a"), false) @@ -308,7 +308,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -325,7 +325,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPod}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-podselector-and-nsselector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -338,7 +338,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -358,7 +358,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPod}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-z-pod-b-c", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -371,14 +371,14 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on any PodSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ingressRule := networkingv1.NetworkPolicyIngressRule{} for _, label := range []map[string]string{{"pod": "b"}, {"pod": "c"}} { ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: &metav1.LabelSelector{MatchLabels: label}}) } policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-x-pod-b-c", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -393,7 +393,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -408,7 +408,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPods}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-pod-a-via-namespace-pod-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -421,7 +421,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -432,7 +432,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolTCP}) allowPort81Policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPort81Policy, nsX) + CreatePolicy(ctx, k8s, allowPort81Policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -446,7 +446,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -457,7 +457,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolTCP}) allowPort81Policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPort81Policy, nsX) + CreatePolicy(ctx, k8s, allowPort81Policy, nsX) reachabilityALLOW := NewReachability(k8s.AllPodStrings(), true) reachabilityALLOW.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -478,7 +478,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 80}, Protocol: &protocolTCP}) allowPort80Policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPort80Policy, nsX) + CreatePolicy(ctx, k8s, allowPort80Policy, nsX) ginkgo.By("Verifying that we can add a policy to unblock port 80") ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityALLOW}) @@ -489,9 +489,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.") reachability := NewReachability(k8s.AllPodStrings(), true) @@ -505,9 +505,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(IngressRules)) protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Blocking all ports other then 81 in the entire namespace") @@ -523,7 +523,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -534,7 +534,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"}, Protocol: &protocolTCP}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) // disallow all traffic from the x or z namespaces @@ -558,9 +558,9 @@ var _ = common.SIGDescribe("Netpol", func() { protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachabilityPort80 := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80}) @@ -577,16 +577,16 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all-mutate-to-deny-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) // part 2) update the policy to deny all policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{} - UpdatePolicy(k8s, policy, nsX) + UpdatePolicy(ctx, k8s, policy, nsX) reachabilityDeny := NewReachability(k8s.AllPodStrings(), true) reachabilityDeny.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) @@ -596,7 +596,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) ginkgo.DeferCleanup(DeleteNamespaceLabel, k8s, nsY, "ns2") @@ -608,14 +608,14 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) // add a new label, we'll remove it after this test is completed - AddNamespaceLabel(k8s, nsY, "ns2", "updated") + AddNamespaceLabel(ctx, k8s, nsY, "ns2", "updated") // anything from namespace 'y' should be able to get to x/a reachabilityWithLabel := NewReachability(k8s.AllPodStrings(), true) @@ -627,7 +627,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "b") @@ -637,13 +637,13 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: allowedLabels}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-pod-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) - AddPodLabels(k8s, nsX, "b", matchLabels) + AddPodLabels(ctx, k8s, nsX, "b", matchLabels) ginkgo.By("x/b is able to reach x/a when label is updated") @@ -656,12 +656,12 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should deny ingress from pods on other namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) IngressRules := networkingv1.NetworkPolicyIngressRule{} IngressRules.From = append(IngressRules.From, networkingv1.NetworkPolicyPeer{PodSelector: &metav1.LabelSelector{MatchLabels: map[string]string{}}}) policy := GenNetworkPolicyWithNameAndPodSelector("deny-empty-policy", metav1.LabelSelector{}, SetSpecIngressRules(IngressRules)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsY}, &Peer{Namespace: nsX}, false) @@ -673,19 +673,19 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "a") policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress-via-label-selector", metav1.LabelSelector{MatchLabels: map[string]string{"target": "isolated"}}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Verify that everything can reach x/a") reachability := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) - AddPodLabels(k8s, nsX, "a", map[string]string{"target": "isolated"}) + AddPodLabels(ctx, k8s, nsX, "a", map[string]string{"target": "isolated"}) reachabilityIsolated := NewReachability(k8s.AllPodStrings(), true) reachabilityIsolated.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -695,10 +695,10 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) policy := GenNetworkPolicyWithNameAndPodSelector("deny-egress-pod-a", metav1.LabelSelector{MatchLabels: map[string]string{"pod": "a"}}, SetSpecEgressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllEgress(NewPodString(nsX, "a"), false) @@ -709,10 +709,10 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should deny egress from all pods in a namespace [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) policy := GenNetworkPolicyWithNameAndPodSelector("deny-egress-ns-x", metav1.LabelSelector{}, SetSpecEgressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{}, false) @@ -740,9 +740,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeEgress, networkingv1.PolicyTypeIngress} protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachabilityPort80 := NewReachability(k8s.AllPodStrings(), true) reachabilityPort80.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -766,7 +766,7 @@ var _ = common.SIGDescribe("Netpol", func() { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) // Building egress policy for x/a to y/a only @@ -783,7 +783,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule1 := networkingv1.NetworkPolicyEgressRule{} egressRule1.To = append(egressRule1.To, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedEgressNamespaces, PodSelector: allowedEgressPods}) egressPolicy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-to-ns-y-pod-a", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule1)) - CreatePolicy(k8s, egressPolicy, nsX) + CreatePolicy(ctx, k8s, egressPolicy, nsX) // Creating ingress policy to allow from x/a to y/a and y/b allowedIngressNamespaces := &metav1.LabelSelector{ @@ -801,8 +801,8 @@ var _ = common.SIGDescribe("Netpol", func() { allowIngressPolicyPodA := GenNetworkPolicyWithNameAndPodMatchLabel("allow-from-xa-on-ya-match-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) allowIngressPolicyPodB := GenNetworkPolicyWithNameAndPodMatchLabel("allow-from-xa-on-yb-match-selector", map[string]string{"pod": "b"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowIngressPolicyPodA, nsY) - CreatePolicy(k8s, allowIngressPolicyPodB, nsY) + CreatePolicy(ctx, k8s, allowIngressPolicyPodA, nsY) + CreatePolicy(ctx, k8s, allowIngressPolicyPodB, nsY) // While applying the policies, traffic needs to be allowed by both egress and ingress rules. // Egress rules only @@ -857,7 +857,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -872,7 +872,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule1 := networkingv1.NetworkPolicyEgressRule{} egressRule1.To = append(egressRule1.To, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPods}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-to-ns-y-pod-a", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule1)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllEgress(NewPodString(nsX, "a"), false) @@ -883,12 +883,12 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP, protocolUDP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Protocol: &protocolTCP}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-by-proto", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachabilityTCP := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityTCP}) @@ -901,12 +901,12 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) IngressRules := networkingv1.NetworkPolicyIngressRule{} IngressRules.Ports = append(IngressRules.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}}) policyAllowOnlyPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-port-80", map[string]string{}, SetSpecIngressRules(IngressRules)) - CreatePolicy(k8s, policyAllowOnlyPort80, nsX) + CreatePolicy(ctx, k8s, policyAllowOnlyPort80, nsX) ginkgo.By("The policy targets port 80 -- so let's make sure traffic on port 81 is blocked") @@ -917,7 +917,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.By("Allowing all ports") policyAllowAll := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) - CreatePolicy(k8s, policyAllowAll, nsX) + CreatePolicy(ctx, k8s, policyAllowAll, nsX) reachabilityAll := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) @@ -929,9 +929,9 @@ var _ = common.SIGDescribe("Netpol", func() { policyAllowPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress-port-80", map[string]string{}, SetSpecEgressRules(egressRule)) protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policyAllowPort80, nsX) + CreatePolicy(ctx, k8s, policyAllowPort80, nsX) ginkgo.By("Making sure ingress doesn't work other than port 80") @@ -942,7 +942,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.By("Allowing all ports") policyAllowAll := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress", map[string]string{}, SetSpecEgressRules(networkingv1.NetworkPolicyEgressRule{})) - CreatePolicy(k8s, policyAllowAll, nsX) + CreatePolicy(ctx, k8s, policyAllowAll, nsX) reachabilityAll := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) @@ -955,9 +955,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules()) protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) // Expect all traffic into, and out of "x" to be False. @@ -965,7 +965,7 @@ var _ = common.SIGDescribe("Netpol", func() { reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) - err := k8s.cleanNetworkPolicies() + err := k8s.cleanNetworkPolicies(ctx) time.Sleep(3 * time.Second) // TODO we can remove this eventually, its just a hack to keep CI stable. framework.ExpectNoError(err, "unable to clean network policies") @@ -980,9 +980,9 @@ var _ = common.SIGDescribe("Netpol", func() { // Getting podServer's status to get podServer's IP, to create the CIDR protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) - podList, err := f.ClientSet.CoreV1().Pods(nsY).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"}) + podList, err := f.ClientSet.CoreV1().Pods(nsY).List(ctx, metav1.ListOptions{LabelSelector: "pod=b"}) framework.ExpectNoError(err, "Failing to list pods in namespace y") pod := podList.Items[0] @@ -995,7 +995,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule1.To = append(egressRule1.To, networkingv1.NetworkPolicyPeer{IPBlock: &networkingv1.IPBlock{CIDR: podServerCIDR}}) policyAllowCIDR := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-cidr-egress-rule", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule1)) - CreatePolicy(k8s, policyAllowCIDR, nsX) + CreatePolicy(ctx, k8s, policyAllowCIDR, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllEgress(NewPodString(nsX, "a"), false) @@ -1007,15 +1007,15 @@ var _ = common.SIGDescribe("Netpol", func() { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"}) + podList, err := f.ClientSet.CoreV1().Pods(nsX).List(ctx, metav1.ListOptions{LabelSelector: "pod=a"}) framework.ExpectNoError(err, "Failing to find pod x/a") podA := podList.Items[0] podServerAllowCIDR := fmt.Sprintf("%s/4", podA.Status.PodIP) - podList, err = f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"}) + podList, err = f.ClientSet.CoreV1().Pods(nsX).List(ctx, metav1.ListOptions{LabelSelector: "pod=b"}) framework.ExpectNoError(err, "Failing to find pod x/b") podB := podList.Items[0] @@ -1029,7 +1029,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule1.To = append(egressRule1.To, networkingv1.NetworkPolicyPeer{IPBlock: &networkingv1.IPBlock{CIDR: podServerAllowCIDR, Except: podServerExceptList}}) policyAllowCIDR := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-cidr-egress-rule", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule1)) - CreatePolicy(k8s, policyAllowCIDR, nsX) + CreatePolicy(ctx, k8s, policyAllowCIDR, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "b"), false) @@ -1041,13 +1041,13 @@ var _ = common.SIGDescribe("Netpol", func() { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"}) + podList, err := f.ClientSet.CoreV1().Pods(nsX).List(ctx, metav1.ListOptions{LabelSelector: "pod=a"}) framework.ExpectNoError(err, "Failing to find pod x/a") podA := podList.Items[0] - podList, err = f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"}) + podList, err = f.ClientSet.CoreV1().Pods(nsX).List(ctx, metav1.ListOptions{LabelSelector: "pod=b"}) framework.ExpectNoError(err, "Failing to find pod x/b") podB := podList.Items[0] @@ -1063,7 +1063,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule1.To = append(egressRule1.To, networkingv1.NetworkPolicyPeer{IPBlock: &networkingv1.IPBlock{CIDR: podServerAllowCIDR, Except: podServerExceptList}}) policyAllowCIDR := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-cidr-egress-rule", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule1)) - CreatePolicy(k8s, policyAllowCIDR, nsX) + CreatePolicy(ctx, k8s, policyAllowCIDR, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "b"), false) @@ -1077,7 +1077,7 @@ var _ = common.SIGDescribe("Netpol", func() { allowPolicy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-cidr-egress-rule", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule3)) // SHOULD THIS BE UPDATE OR CREATE JAY TESTING 10/31 - UpdatePolicy(k8s, allowPolicy, nsX) + UpdatePolicy(ctx, k8s, allowPolicy, nsX) reachabilityAllow := NewReachability(k8s.AllPodStrings(), true) reachabilityAllow.ExpectAllEgress(NewPodString(nsX, "a"), false) @@ -1103,9 +1103,9 @@ var _ = common.SIGDescribe("Netpol", func() { metav1.LabelSelector{MatchLabels: targetLabels}, SetSpecEgressRules(networkingv1.NetworkPolicyEgressRule{})) protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, allowEgressPolicy, nsX) + CreatePolicy(ctx, k8s, allowEgressPolicy, nsX) allowEgressReachability := NewReachability(k8s.AllPodStrings(), true) ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: allowEgressReachability}) @@ -1113,7 +1113,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.") denyAllIngressPolicy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress-via-label-selector", metav1.LabelSelector{MatchLabels: targetLabels}, SetSpecIngressRules()) - CreatePolicy(k8s, denyAllIngressPolicy, nsX) + CreatePolicy(ctx, k8s, denyAllIngressPolicy, nsX) denyIngressToXReachability := NewReachability(k8s.AllPodStrings(), true) denyIngressToXReachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -1126,19 +1126,19 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) ginkgo.By("Creating a default-deny ingress policy.") // Empty podSelector blocks the entire namespace policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress", metav1.LabelSelector{}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 81.") ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolSCTP}) policy = GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-sctp-ingress-on-port-81", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Trying to connect to TCP port 81, which should be blocked by the deny-ingress policy.") reachability := NewReachability(k8s.AllPodStrings(), true) @@ -1156,9 +1156,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-sctp-ingress-on-port-80", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Trying to connect to TCP port 81, which should be blocked by implicit isolation.") reachability := NewReachability(k8s.AllPodStrings(), true) @@ -1172,9 +1172,9 @@ var _ = common.SIGDescribe("Netpol", func() { policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-udp-ingress-on-port-81", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) protocols := []v1.Protocol{protocolTCP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) ginkgo.By("Creating a network policy for the server which allows traffic only via UDP on port 81.") @@ -1188,7 +1188,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1198,7 +1198,7 @@ var _ = common.SIGDescribe("Netpol", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector-for-immutable-ns-label", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -1211,7 +1211,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ @@ -1223,7 +1223,7 @@ var _ = common.SIGDescribe("Netpol", func() { egressRule := networkingv1.NetworkPolicyEgressRule{} egressRule.To = append(egressRule.To, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-match-selector-for-immutable-ns-label", map[string]string{"pod": "a"}, SetSpecEgressRules(egressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX, Pod: "a"}, &Peer{Namespace: nsY}, false) @@ -1247,10 +1247,10 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) @@ -1262,7 +1262,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ginkgo.By("Creating a network policy allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolUDP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1274,7 +1274,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolUDP}) allowPort81Policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-on-port-81-ns-x", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPort81Policy, nsX) + CreatePolicy(ctx, k8s, allowPort81Policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -1286,7 +1286,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1301,7 +1301,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPods}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-pod-a-via-namespace-pod-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -1327,10 +1327,10 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, _, _ := getK8sNamespaces(k8s) policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules()) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false) @@ -1342,7 +1342,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolSCTP} ports := []int32{81} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, nsZ := getK8sNamespaces(k8s) allowedLabels := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1353,7 +1353,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedLabels}) ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolSCTP}) allowPort81Policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-on-port-81-ns-x", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, allowPort81Policy, nsX) + CreatePolicy(ctx, k8s, allowPort81Policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false) @@ -1365,7 +1365,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} - k8s = initializeResources(f, protocols, ports) + k8s = initializeResources(ctx, f, protocols, ports) nsX, nsY, _ := getK8sNamespaces(k8s) allowedNamespaces := &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1380,7 +1380,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: allowedNamespaces, PodSelector: allowedPods}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ns-y-pod-a-via-namespace-pod-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) - CreatePolicy(k8s, policy, nsX) + CreatePolicy(ctx, k8s, policy, nsX) reachability := NewReachability(k8s.AllPodStrings(), true) reachability.ExpectAllIngress(NewPodString(nsX, "a"), false) @@ -1419,7 +1419,7 @@ func getK8sNamespaces(k8s *kubeManager) (string, string, string) { return ns[0], ns[1], ns[2] } -func initializeCluster(f *framework.Framework, protocols []v1.Protocol, ports []int32) (*kubeManager, error) { +func initializeCluster(ctx context.Context, f *framework.Framework, protocols []v1.Protocol, ports []int32) (*kubeManager, error) { dnsDomain := framework.TestContext.ClusterDNSDomain framework.Logf("dns domain: %s", dnsDomain) @@ -1431,7 +1431,7 @@ func initializeCluster(f *framework.Framework, protocols []v1.Protocol, ports [] framework.Logf("initializing cluster: ensuring namespaces, pods and services exist and are ready") - if err := k8s.initializeClusterFromModel(model); err != nil { + if err := k8s.initializeClusterFromModel(ctx, model); err != nil { return nil, err } @@ -1447,8 +1447,8 @@ func initializeCluster(f *framework.Framework, protocols []v1.Protocol, ports [] // initializeResources uses the e2e framework to create all necessary namespace resources, based on the network policy // model derived from the framework. It then waits for the resources described by the model to be up and running // (i.e. all pods are ready and running in their namespaces). -func initializeResources(f *framework.Framework, protocols []v1.Protocol, ports []int32) *kubeManager { - k8s, err := initializeCluster(f, protocols, ports) +func initializeResources(ctx context.Context, f *framework.Framework, protocols []v1.Protocol, ports []int32) *kubeManager { + k8s, err := initializeCluster(ctx, f, protocols, ports) framework.ExpectNoError(err, "unable to initialize resources") return k8s } diff --git a/test/e2e/network/netpol/network_policy_api.go b/test/e2e/network/netpol/network_policy_api.go index ecc0f1317a4..18fe55c7475 100644 --- a/test/e2e/network/netpol/network_policy_api.go +++ b/test/e2e/network/netpol/network_policy_api.go @@ -93,7 +93,7 @@ var _ = common.SIGDescribe("Netpol API", func() { ginkgo.By("getting /apis/networking.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -123,48 +123,48 @@ var _ = common.SIGDescribe("Netpol API", func() { } // NetPol resource create/read/update/watch verbs ginkgo.By("creating") - _, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdNetPol, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + createdNetPol, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenNetPol, err := npClient.Get(context.TODO(), createdNetPol.Name, metav1.GetOptions{}) + gottenNetPol, err := npClient.Get(ctx, createdNetPol.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenNetPol.UID, createdNetPol.UID) ginkgo.By("listing") - nps, err := npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 3, "filtered list should have 3 items") ginkgo.By("watching") framework.Logf("starting watch") - npWatch, err := npClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + npWatch, err := npClient.Watch(ctx, metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) // Test cluster-wide list and watch clusterNPClient := f.ClientSet.NetworkingV1().NetworkPolicies("") ginkgo.By("cluster-wide listing") - clusterNPs, err := clusterNPClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + clusterNPs, err := clusterNPClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(clusterNPs.Items), 3, "filtered list should have 3 items") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") - _, err = clusterNPClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + _, err = clusterNPClient.Watch(ctx, metav1.ListOptions{ResourceVersion: nps.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) ginkgo.By("patching") - patchedNetPols, err := npClient.Patch(context.TODO(), createdNetPol.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedNetPols, err := npClient.Patch(ctx, createdNetPol.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedNetPols.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") npToUpdate := patchedNetPols.DeepCopy() npToUpdate.Annotations["updated"] = "true" - updatedNetPols, err := npClient.Update(context.TODO(), npToUpdate, metav1.UpdateOptions{}) + updatedNetPols, err := npClient.Update(ctx, npToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedNetPols.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -193,20 +193,20 @@ var _ = common.SIGDescribe("Netpol API", func() { } // NetPol resource delete operations ginkgo.By("deleting") - err = npClient.Delete(context.TODO(), createdNetPol.Name, metav1.DeleteOptions{}) + err = npClient.Delete(ctx, createdNetPol.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - _, err = npClient.Get(context.TODO(), createdNetPol.Name, metav1.GetOptions{}) + _, err = npClient.Get(ctx, createdNetPol.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.Failf("expected 404, got %#v", err) } - nps, err = npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 2, "filtered list should have 2 items") ginkgo.By("deleting a collection") - err = npClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = npClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - nps, err = npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 0, "filtered list should have 0 items") }) @@ -231,14 +231,14 @@ var _ = common.SIGDescribe("Netpol API", func() { SetObjectMetaLabel(map[string]string{"special-label": f.UniqueName}), SetSpecPodSelectorMatchLabels(map[string]string{"pod-name": "test-pod"}), SetSpecEgressRules(egressRule)) - _, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectError(err, "request template:%v", npTemplate) ginkgo.By("EndPort field cannot be defined if the Port field is defined as a named (string) port.") egressRule = networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"}, EndPort: &endport}) npTemplate.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{egressRule} - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectError(err, "request template:%v", npTemplate) ginkgo.By("EndPort field must be equal or greater than port.") @@ -246,26 +246,26 @@ var _ = common.SIGDescribe("Netpol API", func() { egressRule = networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 30000}, EndPort: &endport}) npTemplate.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{egressRule} - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectError(err, "request template:%v", npTemplate) ginkgo.By("EndPort field is equal with port.") egressRule.Ports[0].Port = &intstr.IntOrString{Type: intstr.Int, IntVal: 20000} npTemplate.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{egressRule} - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err, "request template:%v", npTemplate) ginkgo.By("EndPort field is greater than port.") egressRule = networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 10000}, EndPort: &endport}) npTemplate.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{egressRule} - _, err = npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + _, err = npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err, "request template:%v", npTemplate) ginkgo.By("deleting all test collection") - err = npClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = npClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - nps, err := npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 0, "filtered list should be 0 items") }) @@ -301,7 +301,7 @@ var _ = common.SIGDescribe("Netpol API", func() { SetObjectMetaLabel(map[string]string{"special-label": f.UniqueName}), SetSpecPodSelectorMatchLabels(map[string]string{"pod-name": "test-pod"}), SetSpecIngressRules(ingressRule)) - newNetPol, err := npClient.Create(context.TODO(), npTemplate, metav1.CreateOptions{}) + newNetPol, err := npClient.Create(ctx, npTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err, "request template:%v", npTemplate) @@ -323,24 +323,24 @@ var _ = common.SIGDescribe("Netpol API", func() { ginkgo.By("NetworkPolicy should support valid status condition") newNetPol.Status = status - _, err = npClient.UpdateStatus(context.TODO(), newNetPol, metav1.UpdateOptions{}) + _, err = npClient.UpdateStatus(ctx, newNetPol, metav1.UpdateOptions{}) framework.ExpectNoError(err, "request template:%v", newNetPol) ginkgo.By("NetworkPolicy should not support status condition without reason field") newNetPol.Status.Conditions[0].Reason = "" - _, err = npClient.UpdateStatus(context.TODO(), newNetPol, metav1.UpdateOptions{}) + _, err = npClient.UpdateStatus(ctx, newNetPol, metav1.UpdateOptions{}) framework.ExpectError(err, "request template:%v", newNetPol) ginkgo.By("NetworkPolicy should not support status condition with duplicated types") newNetPol.Status.Conditions = []metav1.Condition{condition, condition} newNetPol.Status.Conditions[1].Status = metav1.ConditionFalse - _, err = npClient.UpdateStatus(context.TODO(), newNetPol, metav1.UpdateOptions{}) + _, err = npClient.UpdateStatus(ctx, newNetPol, metav1.UpdateOptions{}) framework.ExpectError(err, "request template:%v", newNetPol) ginkgo.By("deleting all test collection") - err = npClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + err = npClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - nps, err := npClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(nps.Items), 0, "filtered list should be 0 items") }) diff --git a/test/e2e/network/netpol/test_helper.go b/test/e2e/network/netpol/test_helper.go index 74ecc0adf87..d41b7105167 100644 --- a/test/e2e/network/netpol/test_helper.go +++ b/test/e2e/network/netpol/test_helper.go @@ -42,26 +42,26 @@ func prettyPrint(policy *networkingv1.NetworkPolicy) string { } // CreatePolicy creates a policy in the given namespace -func CreatePolicy(k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) { +func CreatePolicy(ctx context.Context, k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) { if isVerbose { framework.Logf("****************************************************************") framework.Logf("Network Policy creating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy)) framework.Logf("****************************************************************") } - _, err := k8s.createNetworkPolicy(namespace, policy) + _, err := k8s.createNetworkPolicy(ctx, namespace, policy) framework.ExpectNoError(err, "Unable to create netpol %s/%s", namespace, policy.Name) } // UpdatePolicy updates a networkpolicy -func UpdatePolicy(k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) { +func UpdatePolicy(ctx context.Context, k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) { if isVerbose { framework.Logf("****************************************************************") framework.Logf("Network Policy updating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy)) framework.Logf("****************************************************************") } - _, err := k8s.updateNetworkPolicy(namespace, policy) + _, err := k8s.updateNetworkPolicy(ctx, namespace, policy) framework.ExpectNoError(err, "Unable to update netpol %s/%s", namespace, policy.Name) } @@ -132,30 +132,30 @@ func ValidateOrFail(k8s *kubeManager, testCase *TestCase) { } // AddNamespaceLabels adds a new label to a namespace -func AddNamespaceLabel(k8s *kubeManager, name string, key string, val string) { - ns, err := k8s.getNamespace(name) +func AddNamespaceLabel(ctx context.Context, k8s *kubeManager, name string, key string, val string) { + ns, err := k8s.getNamespace(ctx, name) framework.ExpectNoError(err, "Unable to get namespace %s", name) ns.Labels[key] = val - _, err = k8s.clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + _, err = k8s.clientSet.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Unable to update namespace %s", name) } // DeleteNamespaceLabel deletes a label from a namespace (if present) -func DeleteNamespaceLabel(k8s *kubeManager, name string, key string) { - ns, err := k8s.getNamespace(name) +func DeleteNamespaceLabel(ctx context.Context, k8s *kubeManager, name string, key string) { + ns, err := k8s.getNamespace(ctx, name) framework.ExpectNoError(err, "Unable to get namespace %s", name) if _, ok := ns.Labels[key]; !ok { // nothing to do if the label is not present return } delete(ns.Labels, key) - _, err = k8s.clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + _, err = k8s.clientSet.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Unable to update namespace %s", name) } // AddPodLabels adds new labels to a running pod -func AddPodLabels(k8s *kubeManager, namespace string, name string, newPodLabels map[string]string) { - kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func AddPodLabels(ctx context.Context, k8s *kubeManager, namespace string, name string, newPodLabels map[string]string) { + kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s/%s", namespace, name) if kubePod.Labels == nil { kubePod.Labels = map[string]string{} @@ -163,11 +163,11 @@ func AddPodLabels(k8s *kubeManager, namespace string, name string, newPodLabels for key, val := range newPodLabels { kubePod.Labels[key] = val } - _, err = k8s.clientSet.CoreV1().Pods(namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{}) + _, err = k8s.clientSet.CoreV1().Pods(namespace).Update(ctx, kubePod, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Unable to add pod %s/%s labels", namespace, name) err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) { - waitForPod, err := k8s.getPod(namespace, name) + waitForPod, err := k8s.getPod(ctx, namespace, name) if err != nil { return false, err } @@ -182,18 +182,18 @@ func AddPodLabels(k8s *kubeManager, namespace string, name string, newPodLabels } // ResetPodLabels resets the labels for a deployment's template -func ResetPodLabels(k8s *kubeManager, namespace string, name string) { - kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func ResetPodLabels(ctx context.Context, k8s *kubeManager, namespace string, name string) { + kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s/%s", namespace, name) labels := map[string]string{ podNameLabelKey(): name, } kubePod.Labels = labels - _, err = k8s.clientSet.CoreV1().Pods(namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{}) + _, err = k8s.clientSet.CoreV1().Pods(namespace).Update(ctx, kubePod, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Unable to add pod %s/%s labels", namespace, name) err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) { - waitForPod, err := k8s.getPod(namespace, name) + waitForPod, err := k8s.getPod(ctx, namespace, name) if err != nil { return false, nil } diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index a280270d1bd..13eede1f4a3 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -53,32 +53,32 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { cs = f.ClientSet }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() { DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { framework.Logf("cleaning gce resource for %s", lb) - framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + framework.TestContext.CloudConfig.Provider.CleanupServiceResources(ctx, cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) } //reset serviceLBNames serviceLBNames = []string{} }) ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func(ctx context.Context) { lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault - createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) svcName := "net-tiers-svc" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, svcName) ginkgo.By("creating a pod to be part of the service " + svcName) - _, err := jig.Run(nil) + _, err := jig.Run(ctx, nil) framework.ExpectNoError(err) // Test 1: create a standard tiered LB for the Service. ginkgo.By("creating a Service of type LoadBalancer using the standard network tier") - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) @@ -91,11 +91,11 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) // Wait and verify the LB. - ingressIP := waitAndVerifyLBWithTier(jig, "", createTimeout, lagTimeout) + ingressIP := waitAndVerifyLBWithTier(ctx, jig, "", createTimeout, lagTimeout) // Test 2: re-create a LB of a different tier for the updated Service. ginkgo.By("updating the Service to use the premium (default) tier") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationPremium)) }) framework.ExpectNoError(err) @@ -106,7 +106,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { // Wait until the ingress IP changes. Each tier has its own pool of // IPs, so changing tiers implies changing IPs. - ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout) + ingressIP = waitAndVerifyLBWithTier(ctx, jig, ingressIP, createTimeout, lagTimeout) // Test 3: create a standard-tierd LB with a user-requested IP. ginkgo.By("reserving a static IP for the load balancer") @@ -127,7 +127,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP) ginkgo.By("updating the Service to use the standard tier with a requested IP") - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = requestedIP setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) @@ -139,14 +139,14 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { framework.ExpectEqual(svcTier, cloud.NetworkTierStandard) // Wait until the ingress IP changes and verifies the LB. - waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout) + waitAndVerifyLBWithTier(ctx, jig, ingressIP, createTimeout, lagTimeout) }) }) -func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, existingIP string, waitTimeout, checkTimeout time.Duration) string { +func waitAndVerifyLBWithTier(ctx context.Context, jig *e2eservice.TestJig, existingIP string, waitTimeout, checkTimeout time.Duration) string { // If existingIP is "" this will wait for any ingress IP to show up. Otherwise // it will wait for the ingress IP to change to something different. - svc, err := jig.WaitForNewIngressIP(existingIP, waitTimeout) + svc, err := jig.WaitForNewIngressIP(ctx, existingIP, waitTimeout) framework.ExpectNoError(err) svcPort := int(svc.Spec.Ports[0].Port) @@ -161,7 +161,7 @@ func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, existingIP string, waitTim // If the IP has been used by previous test, sometimes we get the lingering // 404 errors even after the LB is long gone. Tolerate and retry until the // new LB is fully established. - e2eservice.TestReachableHTTPWithRetriableErrorCodes(ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout) + e2eservice.TestReachableHTTPWithRetriableErrorCodes(ctx, ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout) // Verify the network tier matches the desired. svcNetTier, err := gcecloud.GetServiceNetworkTier(svc) diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index fac8c030fb7..9c064608432 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -44,7 +44,7 @@ import ( // host. An error will be returned if the host is not reachable from the pod. // // An empty nodeName will use the schedule to choose where the pod is executed. -func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host string, port, timeout int) error { +func checkConnectivityToHost(ctx context.Context, f *framework.Framework, nodeName, podName, host string, port, timeout int) error { command := []string{ "nc", "-vz", @@ -61,14 +61,14 @@ func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host str pod.Spec.RestartPolicy = v1.RestartPolicyNever podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := podClient.Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return err } - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name) if err != nil { - logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + logs, logErr := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) if logErr != nil { framework.Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr) } else { @@ -87,7 +87,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func(ctx context.Context) { ginkgo.By("Running container which tries to connect to 8.8.8.8") framework.ExpectNoError( - checkConnectivityToHost(f, "", "connectivity-test", "8.8.8.8", 53, 30)) + checkConnectivityToHost(ctx, f, "", "connectivity-test", "8.8.8.8", 53, 30)) }) ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]", func(ctx context.Context) { @@ -95,13 +95,13 @@ var _ = common.SIGDescribe("Networking", func() { e2eskipper.SkipIfNodeOSDistroIs("windows") ginkgo.By("Running container which tries to connect to 2001:4860:4860::8888") framework.ExpectNoError( - checkConnectivityToHost(f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30)) + checkConnectivityToHost(ctx, f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30)) }) ginkgo.It("should provider Internet connection for containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { ginkgo.By("Running container which tries to connect to google.com") framework.ExpectNoError( - checkConnectivityToHost(f, "", "connectivity-test", "google.com", 80, 30)) + checkConnectivityToHost(ctx, f, "", "connectivity-test", "google.com", 80, 30)) }) // First test because it has no dependencies on variables created later on. @@ -124,7 +124,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By(fmt.Sprintf("testing: %s", test.path)) data, err := f.ClientSet.CoreV1().RESTClient().Get(). AbsPath(test.path). - DoRaw(context.TODO()) + DoRaw(ctx) if err != nil { framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) } @@ -134,85 +134,85 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.It("should check kube-proxy urls", func(ctx context.Context) { // TODO: this is overkill we just need the host networking pod // to hit kube-proxy urls. - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By("checking kube-proxy URLs") - config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK") + config.GetSelfURL(ctx, ports.ProxyHealthzPort, "/healthz", "200 OK") // Verify /healthz returns the proper content. - config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated") + config.GetSelfURL(ctx, ports.ProxyHealthzPort, "/healthz", "lastUpdated") // Verify /proxyMode returns http status code 200. - config.GetSelfURLStatusCode(ports.ProxyStatusPort, "/proxyMode", "200") + config.GetSelfURLStatusCode(ctx, ports.ProxyStatusPort, "/proxyMode", "200") }) ginkgo.Describe("Granular Checks: Services", func() { ginkgo.It("should function for pod-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) - err = config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) - err := config.DialFromTestContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeSCTPPort)) - err = config.DialFromTestContainer("sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for node-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for node-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) - err = config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -220,58 +220,58 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to service test until DialFromNode supports SCTP #96482") - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterSCTPPort)) - err := config.DialFromNode("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeSCTPPort)) - err = config.DialFromNode("sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromEndpointContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) - err = config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) - err := config.DialFromEndpointContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeSCTPPort)) - err = config.DialFromEndpointContainer("sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -280,79 +280,79 @@ var _ = common.SIGDescribe("Networking", func() { // This test ensures that in a situation where multiple services exist with the same selector, // deleting one of the services does not affect the connectivity of the remaining service ginkgo.It("should function for multiple endpoint-Services with same selector", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By("creating a second service with same selector") - svc2, httpPort := createSecondNodePortService(f, config) + svc2, httpPort := createSecondNodePortService(ctx, f, config) // original service should work ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromEndpointContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } // Dial second service ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (svc2.clusterIP)", config.EndpointPods[0].Name, svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromEndpointContainer("http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, httpPort)) - err = config.DialFromEndpointContainer("http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By("deleting the original node port service") - config.DeleteNodePortService() + config.DeleteNodePortService(ctx) // Second service should continue to function unaffected ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (svc2.clusterIP)", config.EndpointPods[0].Name, svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromEndpointContainer("http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, httpPort)) - err = config.DialFromEndpointContainer("http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should update endpoints: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint (initial), %v", err) } ginkgo.By("Deleting a pod which, will be replaced with a new endpoint") - config.DeleteNetProxyPod() + config.DeleteNetProxyPod(ctx) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP) (endpoint recovery)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint (recovery), %v", err) } }) ginkgo.It("should update endpoints: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err := config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint (initial), %v", err) } ginkgo.By("Deleting a pod which, will be replaced with a new endpoint") - config.DeleteNetProxyPod() + config.DeleteNetProxyPod(ctx) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP) (endpoint recovery)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err = config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint (recovery), %v", err) } @@ -360,20 +360,20 @@ var _ = common.SIGDescribe("Networking", func() { // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. ginkgo.It("should update nodePort: http [Slow]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) - ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) - err := config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (ctx, nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + err := config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("Error dialing http from node: %v", err) } ginkgo.By("Deleting the node port access point") - config.DeleteNodePortService() + config.DeleteNodePortService(ctx) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP) and getting ZERO host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) // #106770 MaxTries can be very large on large clusters, with the risk that a new NodePort is created by another test and start to answer traffic. // Since we only want to assert that traffic is not being forwarded anymore and the retry timeout is 2 seconds, consider the test is correct // if the service doesn't answer after 10 tries. - err = config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, 10, 10, sets.NewString()) + err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, 10, 10, sets.NewString()) if err != nil { framework.Failf("Failure validating that node port service STOPPED removed properly: %v", err) } @@ -381,9 +381,9 @@ var _ = common.SIGDescribe("Networking", func() { // quick validation of udp, next test confirms that this services update as well after endpoints are removed, but is slower. ginkgo.It("should support basic nodePort: udp functionality", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) - err := config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("Failure validating that nodePort service WAS forwarding properly: %v", err) } @@ -391,21 +391,21 @@ var _ = common.SIGDescribe("Networking", func() { // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. ginkgo.It("should update nodePort: udp [Slow]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) - err := config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("Failure validating that nodePort service WAS forwarding properly: %v", err) } ginkgo.By("Deleting the node port access point") - config.DeleteNodePortService() + config.DeleteNodePortService(ctx) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ZERO host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) // #106770 MaxTries can be very large on large clusters, with the risk that a new NodePort is created by another test and start to answer traffic. // Since we only want to assert that traffic is not being forwarded anymore and the retry timeout is 2 seconds, consider the test is correct // if the service doesn't answer after 10 tries. - err = config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, 10, 10, sets.NewString()) + err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, 10, 10, sets.NewString()) if err != nil { framework.Failf("Failure validating that node port service STOPPED removed properly: %v", err) } @@ -413,11 +413,11 @@ var _ = common.SIGDescribe("Networking", func() { // [LinuxOnly]: Windows does not support session affinity. ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort)) // Check if number of endpoints returned are exactly one. - eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks) + eps, err := config.GetEndpointsFromTestContainer(ctx, "http", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks) if err != nil { framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } @@ -431,11 +431,11 @@ var _ = common.SIGDescribe("Networking", func() { // [LinuxOnly]: Windows does not support session affinity. ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort)) // Check if number of endpoints returned are exactly one. - eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks) + eps, err := config.GetEndpointsFromTestContainer(ctx, "udp", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks) if err != nil { framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } @@ -448,20 +448,20 @@ var _ = common.SIGDescribe("Networking", func() { }) ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) - err := config.DialEchoFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) + err := config.DialEchoFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } }) ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f) + config := e2enetwork.NewNetworkingTestConfig(ctx, f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) message := "n" + strings.Repeat("o", 1999) - err := config.DialEchoFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) + err := config.DialEchoFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -471,17 +471,17 @@ var _ = common.SIGDescribe("Networking", func() { // because the pods will try to acquire the same port in the host. // We run the test in serial, to avoid port conflicts. ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) { - config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) + config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) ginkgo.By("pod-Service(hostNetwork): http") ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err := config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -489,13 +489,13 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By("pod-Service(hostNetwork): udp") ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err = config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) - err = config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromTestContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -503,24 +503,24 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By("node-Service(hostNetwork): http") ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort)) - err = config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) - err = config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By("node-Service(hostNetwork): udp") ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort)) - err = config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) - err = config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) + err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -529,7 +529,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) - err = config.DialEchoFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) + err = config.DialEchoFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -538,7 +538,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) message = "n" + strings.Repeat("o", 1999) - err = config.DialEchoFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) + err = config.DialEchoFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message) if err != nil { framework.Failf("failed dialing endpoint, %v", err) } @@ -551,7 +551,7 @@ var _ = common.SIGDescribe("Networking", func() { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessSSHKeyPresent() - hosts, err := e2essh.NodeSSHHosts(f.ClientSet) + hosts, err := e2essh.NodeSSHHosts(ctx, f.ClientSet) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") @@ -563,7 +563,7 @@ var _ = common.SIGDescribe("Networking", func() { svc := "iptables-flush-test" ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc) - podNames, svcIP, err := StartServeHostnameService(f.ClientSet, getServeHostnameService(svc), ns, numPods) + podNames, svcIP, err := StartServeHostnameService(ctx, f.ClientSet, getServeHostnameService(svc), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns) // Ideally we want to reload the system firewall, but we don't necessarily @@ -572,7 +572,7 @@ var _ = common.SIGDescribe("Networking", func() { // chains. ginkgo.By("dumping iptables rules on node " + host) - result, err := e2essh.SSH("sudo iptables-save", host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, "sudo iptables-save", host, framework.TestContext.Provider) e2essh.LogResult(result) if err != nil || result.Code != 0 { framework.Failf("couldn't dump iptable rules: %v", err) @@ -604,18 +604,18 @@ var _ = common.SIGDescribe("Networking", func() { cmd := strings.Join(append(deleteRuleCmds, deleteChainCmds...), "\n") ginkgo.By("deleting all KUBE-* iptables chains") - result, err = e2essh.SSH(cmd, host, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) framework.Failf("couldn't delete iptable rules: %v", err) } ginkgo.By("verifying that kube-proxy rules are eventually recreated") - framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, podNames, svcIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, f.ClientSet, ns, podNames, svcIP, servicePort)) ginkgo.By("verifying that kubelet rules are eventually recreated") err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) { - result, err = e2essh.SSH("sudo iptables-save -t mangle", host, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, "sudo iptables-save -t mangle", host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) return false, err @@ -635,7 +635,7 @@ var _ = common.SIGDescribe("Networking", func() { // This is [Serial] because it can't run at the same time as the // [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded. ginkgo.It("should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial]", func(ctx context.Context) { - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) hostExec := utils.NewHostExec(f) ginkgo.DeferCleanup(hostExec.Cleanup) @@ -643,7 +643,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.By("getting the state of the sctp module on the selected node") nodes := &v1.NodeList{} nodes.Items = append(nodes.Items, *node) - sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) ginkgo.By("creating a pod with hostport on the selected node") podName := "hostport" @@ -653,13 +653,13 @@ var _ = common.SIGDescribe("Networking", func() { e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection) ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name)) - e2epod.NewPodClient(f).CreateSync(podSpec) + e2epod.NewPodClient(f).CreateSync(ctx, podSpec) ginkgo.DeferCleanup(func(ctx context.Context) { err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name) }) ginkgo.By("validating sctp module is still not loaded") - sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) if !sctpLoadedAtStart && sctpLoadedAtEnd { framework.Failf("The state of the sctp module has changed due to the test case") } diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index d944527f93a..905f63d60f4 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -57,7 +57,7 @@ const ( serverServiceName = "iperf2-server" ) -func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6 bool) (*appsv1.Deployment, error) { +func iperf2ServerDeployment(ctx context.Context, client clientset.Interface, namespace string, isIPV6 bool) (*appsv1.Deployment, error) { framework.Logf("deploying iperf2 server") one := int64(1) replicas := int32(1) @@ -83,7 +83,7 @@ func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6 }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } @@ -96,7 +96,7 @@ func iperf2ServerDeployment(client clientset.Interface, namespace string, isIPV6 return deployment, nil } -func iperf2ServerService(client clientset.Interface, namespace string) (*v1.Service, error) { +func iperf2ServerService(ctx context.Context, client clientset.Interface, namespace string) (*v1.Service, error) { service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: serverServiceName}, Spec: v1.ServiceSpec{ @@ -108,16 +108,16 @@ func iperf2ServerService(client clientset.Interface, namespace string) (*v1.Serv }, }, } - return client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + return client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) } -func iperf2ClientDaemonSet(client clientset.Interface, namespace string) (*appsv1.DaemonSet, error) { +func iperf2ClientDaemonSet(ctx context.Context, client clientset.Interface, namespace string) (*appsv1.DaemonSet, error) { one := int64(1) labels := map[string]string{labelKey: clientLabelValue} spec := e2edaemonset.NewDaemonSet("iperf2-clients", imageutils.GetE2EImage(imageutils.Agnhost), labels, nil, nil, nil) spec.Spec.Template.Spec.TerminationGracePeriodSeconds = &one - ds, err := client.AppsV1().DaemonSets(namespace).Create(context.TODO(), spec, metav1.CreateOptions{}) + ds, err := client.AppsV1().DaemonSets(namespace).Create(ctx, spec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("daemonset %s Create API error: %v", spec.Name, err) } @@ -142,8 +142,8 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", f := framework.NewDefaultFramework("network-perf") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It(fmt.Sprintf("should run iperf2"), func(ctx context.Context) { - readySchedulableNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + ginkgo.It("should run iperf2", func(ctx context.Context) { + readySchedulableNodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) familyStr := "" @@ -156,22 +156,22 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", } // Step 1: set up iperf2 server -- a single pod on any node - _, err = iperf2ServerDeployment(f.ClientSet, f.Namespace.Name, framework.TestContext.ClusterIsIPv6()) + _, err = iperf2ServerDeployment(ctx, f.ClientSet, f.Namespace.Name, framework.TestContext.ClusterIsIPv6()) framework.ExpectNoError(err, "deploy iperf2 server deployment") - _, err = iperf2ServerService(f.ClientSet, f.Namespace.Name) + _, err = iperf2ServerService(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err, "deploy iperf2 server service") // Step 2: set up iperf2 client daemonset // initially, the clients don't do anything -- they simply pause until they're called - _, err = iperf2ClientDaemonSet(f.ClientSet, f.Namespace.Name) + _, err = iperf2ClientDaemonSet(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err, "deploy iperf2 client daemonset") // Make sure the server is ready to go framework.Logf("waiting for iperf2 server endpoints") err = wait.Poll(2*time.Second, largeClusterTimeout, func() (done bool, err error) { listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serverServiceName)} - esList, err := f.ClientSet.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), listOptions) + esList, err := f.ClientSet.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, listOptions) framework.ExpectNoError(err, "Error fetching EndpointSlice for Service %s/%s", f.Namespace.Name, serverServiceName) if len(esList.Items) == 0 { @@ -190,7 +190,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", framework.Logf("waiting for client pods to be running") var clientPodList *v1.PodList err = wait.Poll(2*time.Second, largeClusterTimeout, func() (done bool, err error) { - clientPodList, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), clientPodsListOptions) + clientPodList, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, clientPodsListOptions) if err != nil { return false, err } @@ -208,7 +208,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", framework.Logf("all client pods are ready: %d pods", len(clientPodList.Items)) // Get a reference to the server pod for later - serverPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), serverPodsListOptions) + serverPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, serverPodsListOptions) framework.ExpectNoError(err) if len(serverPodList.Items) != 1 { framework.Failf("expected 1 server pod, found %d", len(serverPodList.Items)) @@ -235,7 +235,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", podName := pod.Name nodeName := pod.Spec.NodeName - iperfVersion := e2epod.ExecShellInPod(f, podName, "iperf -v || true") + iperfVersion := e2epod.ExecShellInPod(ctx, f, podName, "iperf -v || true") framework.Logf("iperf version: %s", iperfVersion) for try := 0; ; try++ { @@ -248,7 +248,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", */ command := fmt.Sprintf(`iperf %s -e -p %d --reportstyle C -i 1 -c %s && sleep 5`, familyStr, iperf2Port, serverServiceName) framework.Logf("attempting to run command '%s' in client pod %s (node %s)", command, podName, nodeName) - output := e2epod.ExecShellInPod(f, podName, command) + output := e2epod.ExecShellInPod(ctx, f, podName, command) framework.Logf("output from exec on client pod %s (node %s): \n%s\n", podName, nodeName, output) results, err := ParseIPerf2EnhancedResultsFromCSV(output) diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index 6d05bd7b079..613b5c57e3f 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -70,7 +70,7 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { pc := cs.CoreV1().Pods(f.Namespace.Name) ginkgo.By("creating a test pod on each Node") - nodes, err := e2enode.GetReadySchedulableNodes(cs) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) framework.ExpectNotEqual(len(nodes.Items), 0, "no Nodes in the cluster") @@ -78,13 +78,13 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { // target Pod at Node nodeSelection := e2epod.NodeSelection{Name: node.Name} e2epod.SetNodeSelection(&testPod.Spec, nodeSelection) - _, err = pc.Create(context.TODO(), &testPod, metav1.CreateOptions{}) + _, err = pc.Create(ctx, &testPod, metav1.CreateOptions{}) framework.ExpectNoError(err) } ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { - pods, err := pc.List(context.TODO(), metav1.ListOptions{LabelSelector: noSNATTestName}) + pods, err := pc.List(ctx, metav1.ListOptions{LabelSelector: noSNATTestName}) if err != nil { return false, err } @@ -103,7 +103,7 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { framework.ExpectNoError(err) ginkgo.By("sending traffic from each pod to the others and checking that SNAT does not occur") - pods, err := pc.List(context.TODO(), metav1.ListOptions{LabelSelector: noSNATTestName}) + pods, err := pc.List(ctx, metav1.ListOptions{LabelSelector: noSNATTestName}) framework.ExpectNoError(err) // hit the /clientip endpoint on every other Pods to check if source ip is preserved diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 40b59eb1a65..eddac45c71d 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -83,13 +83,13 @@ var _ = common.SIGDescribe("Proxy", func() { Test for Proxy, logs port endpoint Select any node in the cluster to invoke /proxy/nodes/:10250/logs endpoint. This endpoint MUST be reachable. */ - ginkgo.It("should proxy logs on node with explicit kubelet port using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") }) + ginkgo.It("should proxy logs on node with explicit kubelet port using proxy subresource ", func(ctx context.Context) { nodeProxyTest(ctx, f, prefix+"/nodes/", ":10250/proxy/logs/") }) /* Test for Proxy, logs endpoint Select any node in the cluster to invoke /proxy/nodes///logs endpoint. This endpoint MUST be reachable. */ - ginkgo.It("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") }) + ginkgo.It("should proxy logs on node using proxy subresource ", func(ctx context.Context) { nodeProxyTest(ctx, f, prefix+"/nodes/", "/proxy/logs/") }) // using the porter image to serve content, access the content // (of multiple pods?) from multiple (endpoints/services?) @@ -101,7 +101,7 @@ var _ = common.SIGDescribe("Proxy", func() { framework.ConformanceIt("should proxy through a service and a pod ", func(ctx context.Context) { start := time.Now() labels := map[string]string{"proxy-service-target": "true"} - service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{ + service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "proxy-service-", }, @@ -176,11 +176,11 @@ var _ = common.SIGDescribe("Proxy", func() { Labels: labels, CreatedPods: &pods, } - err = e2erc.RunRC(cfg) + err = e2erc.RunRC(ctx, cfg) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, cfg.Name) - err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name) + err = waitForEndpoint(ctx, f.ClientSet, f.Namespace.Name, service.Name) framework.ExpectNoError(err) // table constructors @@ -238,7 +238,7 @@ var _ = common.SIGDescribe("Proxy", func() { go func(i int, path, val string) { defer wg.Done() // this runs the test case - body, status, d, err := doProxy(f, path, i) + body, status, d, err := doProxy(ctx, f, path, i) if err != nil { if serr, ok := err.(*apierrors.StatusError); ok { @@ -264,7 +264,7 @@ var _ = common.SIGDescribe("Proxy", func() { } if len(errs) != 0 { - body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() + body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do(ctx).Raw() if err != nil { framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err) } else { @@ -310,12 +310,12 @@ var _ = common.SIGDescribe("Proxy", func() { }}, RestartPolicy: v1.RestartPolicyNever, }} - _, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Pod didn't start within time out period") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period") framework.Logf("Creating service...") - _, err = f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ + _, err = f.ClientSet.CoreV1().Services(ns).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: testSvcName, Namespace: ns, @@ -404,12 +404,12 @@ var _ = common.SIGDescribe("Proxy", func() { }}, RestartPolicy: v1.RestartPolicyNever, }} - _, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Pod didn't start within time out period") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period") framework.Logf("Creating service...") - _, err = f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ + _, err = f.ClientSet.CoreV1().Services(ns).Create(ctx, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: testSvcName, Namespace: ns, @@ -544,7 +544,7 @@ func validateProxyVerbRequest(client *http.Client, urlString string, httpVerb st } } -func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCode int, d time.Duration, err error) { +func doProxy(ctx context.Context, f *framework.Framework, path string, i int) (body []byte, statusCode int, d time.Duration, err error) { // About all of the proxy accesses in this file: // * AbsPath is used because it preserves the trailing '/'. // * Do().Raw() is used (instead of DoRaw()) because it will turn an @@ -552,7 +552,7 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod // chance of the things we are talking to being confused for an error // that apiserver would have emitted. start := time.Now() - body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do(context.TODO()).StatusCode(&statusCode).Raw() + body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do(ctx).StatusCode(&statusCode).Raw() d = time.Since(start) if len(body) > 0 { framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d) @@ -571,16 +571,16 @@ func truncate(b []byte, maxLen int) []byte { return b2 } -func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { +func nodeProxyTest(ctx context.Context, f *framework.Framework, prefix, nodeDest string) { // TODO: investigate why it doesn't work on master Node. - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) // TODO: Change it to test whether all requests succeeded when requests // not reaching Kubelet issue is debugged. serviceUnavailableErrors := 0 for i := 0; i < proxyAttempts; i++ { - _, status, d, err := doProxy(f, prefix+node.Name+nodeDest, i) + _, status, d, err := doProxy(ctx, f, prefix+node.Name+nodeDest, i) if status == http.StatusServiceUnavailable { framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err) time.Sleep(time.Second) @@ -599,11 +599,11 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { } // waitForEndpoint waits for the specified endpoint to be ready. -func waitForEndpoint(c clientset.Interface, ns, name string) error { +func waitForEndpoint(ctx context.Context, c clientset.Interface, ns, name string) error { // registerTimeout is how long to wait for an endpoint to be registered. registerTimeout := time.Minute for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) { - endpoint, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{}) + endpoint, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("Endpoint %s/%s is not ready yet", ns, name) continue diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index d2b3cc6764e..88900053f93 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -110,7 +110,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra } // PrepareScaleTest prepares framework for ingress scale testing. -func (f *IngressScaleFramework) PrepareScaleTest() error { +func (f *IngressScaleFramework) PrepareScaleTest(ctx context.Context) error { f.Logger.Infof("Initializing ingress test suite and gce controller...") f.Jig = e2eingress.NewIngressTestJig(f.Clientset) f.Jig.Logger = f.Logger @@ -119,7 +119,7 @@ func (f *IngressScaleFramework) PrepareScaleTest() error { Client: f.Clientset, Cloud: f.CloudConfig, } - if err := f.GCEController.Init(); err != nil { + if err := f.GCEController.Init(ctx); err != nil { return fmt.Errorf("failed to initialize GCE controller: %v", err) } @@ -130,13 +130,13 @@ func (f *IngressScaleFramework) PrepareScaleTest() error { } // CleanupScaleTest cleans up framework for ingress scale testing. -func (f *IngressScaleFramework) CleanupScaleTest() []error { +func (f *IngressScaleFramework) CleanupScaleTest(ctx context.Context) []error { var errs []error f.Logger.Infof("Cleaning up ingresses...") for _, ing := range f.ScaleTestIngs { if ing != nil { - if err := f.Clientset.NetworkingV1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, metav1.DeleteOptions{}); err != nil { + if err := f.Clientset.NetworkingV1().Ingresses(ing.Namespace).Delete(ctx, ing.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } @@ -144,20 +144,20 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up services...") for _, svc := range f.ScaleTestSvcs { if svc != nil { - if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil { + if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } } if f.ScaleTestDeploy != nil { f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) - if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { + if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(ctx, f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) } } f.Logger.Infof("Cleaning up cloud resources...") - if err := f.GCEController.CleanupIngressControllerWithTimeout(ingressesCleanupTimeout); err != nil { + if err := f.GCEController.CleanupIngressControllerWithTimeout(ctx, ingressesCleanupTimeout); err != nil { errs = append(errs, err) } @@ -165,12 +165,12 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { } // RunScaleTest runs ingress scale testing. -func (f *IngressScaleFramework) RunScaleTest() []error { +func (f *IngressScaleFramework) RunScaleTest(ctx context.Context) []error { var errs []error testDeploy := generateScaleTestBackendDeploymentSpec(scaleTestNumBackends) f.Logger.Infof("Creating deployment %s...", testDeploy.Name) - testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(context.TODO(), testDeploy, metav1.CreateOptions{}) + testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(ctx, testDeploy, metav1.CreateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err)) return errs @@ -179,7 +179,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { if f.EnableTLS { f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName) - if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil { + if err := f.Jig.PrepareTLSSecret(ctx, f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil { errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err)) return errs } @@ -188,7 +188,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { // numIngsCreated keeps track of how many ingresses have been created. numIngsCreated := 0 - prepareIngsFunc := func(numIngsNeeded int) { + prepareIngsFunc := func(ctx context.Context, numIngsNeeded int) { var ingWg sync.WaitGroup numIngsToCreate := numIngsNeeded - numIngsCreated ingWg.Add(numIngsToCreate) @@ -203,7 +203,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { defer ingWg.Done() start := time.Now() - svcCreated, ingCreated, err := f.createScaleTestServiceIngress(suffix, f.EnableTLS) + svcCreated, ingCreated, err := f.createScaleTestServiceIngress(ctx, suffix, f.EnableTLS) svcQueue <- svcCreated ingQueue <- ingCreated if err != nil { @@ -211,7 +211,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { return } f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name) - if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil { + if err := f.Jig.WaitForGivenIngressWithTimeout(ctx, ingCreated, false, waitForIngressMaxTimeout); err != nil { errQueue <- err return } @@ -251,10 +251,10 @@ func (f *IngressScaleFramework) RunScaleTest() []error { f.BatchDurations = append(f.BatchDurations, elapsed) } - measureCreateUpdateFunc := func() { + measureCreateUpdateFunc := func(ctx context.Context) { f.Logger.Infof("Create one more ingress and wait for it to come up") start := time.Now() - svcCreated, ingCreated, err := f.createScaleTestServiceIngress(fmt.Sprintf("%d", numIngsCreated), f.EnableTLS) + svcCreated, ingCreated, err := f.createScaleTestServiceIngress(ctx, fmt.Sprintf("%d", numIngsCreated), f.EnableTLS) numIngsCreated = numIngsCreated + 1 f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated) f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated) @@ -264,7 +264,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { } f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name) - if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil { + if err := f.Jig.WaitForGivenIngressWithTimeout(ctx, ingCreated, false, waitForIngressMaxTimeout); err != nil { errs = append(errs, err) return } @@ -273,20 +273,20 @@ func (f *IngressScaleFramework) RunScaleTest() []error { f.StepCreateLatencies = append(f.StepCreateLatencies, elapsed) f.Logger.Infof("Updating ingress and wait for change to take effect") - ingToUpdate, err := f.Clientset.NetworkingV1().Ingresses(f.Namespace).Get(context.TODO(), ingCreated.Name, metav1.GetOptions{}) + ingToUpdate, err := f.Clientset.NetworkingV1().Ingresses(f.Namespace).Get(ctx, ingCreated.Name, metav1.GetOptions{}) if err != nil { errs = append(errs, err) return } addTestPathToIngress(ingToUpdate) start = time.Now() - ingToUpdate, err = f.Clientset.NetworkingV1().Ingresses(f.Namespace).Update(context.TODO(), ingToUpdate, metav1.UpdateOptions{}) + ingToUpdate, err = f.Clientset.NetworkingV1().Ingresses(f.Namespace).Update(ctx, ingToUpdate, metav1.UpdateOptions{}) if err != nil { errs = append(errs, err) return } - if err := f.Jig.WaitForGivenIngressWithTimeout(ingToUpdate, false, waitForIngressMaxTimeout); err != nil { + if err := f.Jig.WaitForGivenIngressWithTimeout(ctx, ingToUpdate, false, waitForIngressMaxTimeout); err != nil { errs = append(errs, err) return } @@ -299,9 +299,9 @@ func (f *IngressScaleFramework) RunScaleTest() []error { for _, num := range f.NumIngressesTest { f.Logger.Infof("Create more ingresses until we reach %d ingresses", num) - prepareIngsFunc(num) + prepareIngsFunc(ctx, num) f.Logger.Infof("Measure create and update latency with %d ingresses", num) - measureCreateUpdateFunc() + measureCreateUpdateFunc(ctx) if len(errs) != 0 { return errs @@ -371,12 +371,12 @@ func addTestPathToIngress(ing *networkingv1.Ingress) { }) } -func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *networkingv1.Ingress, error) { - svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(context.TODO(), generateScaleTestServiceSpec(suffix), metav1.CreateOptions{}) +func (f *IngressScaleFramework) createScaleTestServiceIngress(ctx context.Context, suffix string, enableTLS bool) (*v1.Service, *networkingv1.Ingress, error) { + svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(ctx, generateScaleTestServiceSpec(suffix), metav1.CreateOptions{}) if err != nil { return nil, nil, err } - ingCreated, err := f.Clientset.NetworkingV1().Ingresses(f.Namespace).Create(context.TODO(), generateScaleTestIngressSpec(suffix, enableTLS), metav1.CreateOptions{}) + ingCreated, err := f.Clientset.NetworkingV1().Ingresses(f.Namespace).Create(ctx, generateScaleTestIngressSpec(suffix, enableTLS), metav1.CreateOptions{}) if err != nil { return nil, nil, err } diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index 6026de245e4..6d7bc0e8691 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -166,23 +166,26 @@ func main() { f.NumIngressesTest = numIngressesTest } + // This could be used to set a deadline. + ctx := context.Background() + // Real test begins. if cleanup { defer func() { - if errs := f.CleanupScaleTest(); len(errs) != 0 { + if errs := f.CleanupScaleTest(ctx); len(errs) != 0 { klog.Errorf("Failed to cleanup scale test: %v", errs) testSuccessFlag = false } }() } - err = f.PrepareScaleTest() + err = f.PrepareScaleTest(ctx) if err != nil { klog.Errorf("Failed to prepare scale test: %v", err) testSuccessFlag = false return } - if errs := f.RunScaleTest(); len(errs) != 0 { + if errs := f.RunScaleTest(ctx); len(errs) != 0 { klog.Errorf("Failed while running scale test: %v", errs) testSuccessFlag = false } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index fcaf4eebb71..0298d6d7b88 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -154,9 +154,9 @@ func affinityCheckFromPod(execPod *v1.Pod, serviceIP string, servicePort int) (t // affinityCheckFromTest returns interval, timeout and function pinging the service and // returning pinged hosts for pinging the service from the test itself. -func affinityCheckFromTest(cs clientset.Interface, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) { +func affinityCheckFromTest(ctx context.Context, cs clientset.Interface, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) { interval := 2 * time.Second - timeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) + timeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs) params := &e2enetwork.HTTPPokeParams{Timeout: 2 * time.Second} getHosts := func() []string { @@ -178,13 +178,13 @@ func affinityCheckFromTest(cs clientset.Interface, serviceIP string, servicePort // number of same response observed in a row. If affinity is not expected, the // test will keep observe until different responses observed. The function will // return false only in case of unexpected errors. -func checkAffinity(cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool { +func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool { var interval, timeout time.Duration var getHosts func() []string if execPod != nil { interval, timeout, getHosts = affinityCheckFromPod(execPod, serviceIP, servicePort) } else { - interval, timeout, getHosts = affinityCheckFromTest(cs, serviceIP, servicePort) + interval, timeout, getHosts = affinityCheckFromTest(ctx, cs, serviceIP, servicePort) } var tracker affinityTracker @@ -264,11 +264,11 @@ func checkAffinityFailed(tracker affinityTracker, err string) { // StartServeHostnameService creates a replication controller that serves its // hostname and a service on top of it. -func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { +func StartServeHostnameService(ctx context.Context, c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { podNames := make([]string, replicas) name := svc.ObjectMeta.Name ginkgo.By("creating service " + name + " in namespace " + ns) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return podNames, "", err } @@ -287,7 +287,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } - err = e2erc.RunRC(config) + err = e2erc.RunRC(ctx, config) if err != nil { return podNames, "", err } @@ -301,7 +301,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string } sort.StringSlice(podNames).Sort() - service, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) + service, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return podNames, "", err } @@ -313,11 +313,11 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string } // StopServeHostnameService stops the given service. -func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { - if err := e2erc.DeleteRCAndWaitForGC(clientset, ns, name); err != nil { +func StopServeHostnameService(ctx context.Context, clientset clientset.Interface, ns, name string) error { + if err := e2erc.DeleteRCAndWaitForGC(ctx, clientset, ns, name); err != nil { return err } - if err := clientset.CoreV1().Services(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { + if err := clientset.CoreV1().Services(ns).Delete(ctx, name, metav1.DeleteOptions{}); err != nil { return err } return nil @@ -327,15 +327,15 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er // host exec pod of host network type and from the exec pod of container network type. // Each pod in the service is expected to echo its name. These names are compared with the // given expectedPods list after a sort | uniq. -func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error { +func verifyServeHostnameServiceUp(ctx context.Context, c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error { // to verify from host network - hostExecPod := launchHostExecPod(c, ns, "verify-service-up-host-exec-pod") + hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-up-host-exec-pod") // to verify from container's network - execPod := e2epod.CreateExecPodOrFail(c, ns, "verify-service-up-exec-pod-", nil) + execPod := e2epod.CreateExecPodOrFail(ctx, c, ns, "verify-service-up-exec-pod-", nil) defer func() { - e2epod.DeletePodOrFail(c, ns, hostExecPod.Name) - e2epod.DeletePodOrFail(c, ns, execPod.Name) + e2epod.DeletePodOrFail(ctx, c, ns, hostExecPod.Name) + e2epod.DeletePodOrFail(ctx, c, ns, execPod.Name) }() // verify service from pod @@ -397,11 +397,11 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods } // verifyServeHostnameServiceDown verifies that the given service isn't served. -func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error { +func verifyServeHostnameServiceDown(ctx context.Context, c clientset.Interface, ns string, serviceIP string, servicePort int) error { // verify from host network - hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod") + hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-down-host-exec-pod") defer func() { - e2epod.DeletePodOrFail(c, ns, hostExecPod.Name) + e2epod.DeletePodOrFail(ctx, c, ns, hostExecPod.Name) }() ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) @@ -681,10 +681,10 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url) } -func testHTTPHealthCheckNodePortFromTestContainer(config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, expectSucceed bool, threshold int) error { +func testHTTPHealthCheckNodePortFromTestContainer(ctx context.Context, config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, expectSucceed bool, threshold int) error { count := 0 pollFn := func() (bool, error) { - statusCode, err := config.GetHTTPCodeFromTestContainer( + statusCode, err := config.GetHTTPCodeFromTestContainer(ctx, "/healthz", host, port) @@ -728,9 +728,9 @@ func getServeHostnameService(name string) *v1.Service { } // waitForAPIServerUp waits for the kube-apiserver to be up. -func waitForAPIServerUp(c clientset.Interface) error { +func waitForAPIServerUp(ctx context.Context, c clientset.Interface) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).Raw() + body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(ctx).Raw() if err == nil && string(body) == "ok" { return nil } @@ -740,8 +740,8 @@ func waitForAPIServerUp(c clientset.Interface) error { // getEndpointNodesWithInternalIP returns a map of nodenames:internal-ip on which the // endpoints of the Service are running. -func getEndpointNodesWithInternalIP(jig *e2eservice.TestJig) (map[string]string, error) { - nodesWithIPs, err := jig.GetEndpointNodesWithIP(v1.NodeInternalIP) +func getEndpointNodesWithInternalIP(ctx context.Context, jig *e2eservice.TestJig) (map[string]string, error) { + nodesWithIPs, err := jig.GetEndpointNodesWithIP(ctx, v1.NodeInternalIP) if err != nil { return nil, err } @@ -773,7 +773,7 @@ var _ = common.SIGDescribe("Services", func() { Description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' service running in the cluster. */ framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) { - _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes") }) @@ -792,10 +792,10 @@ var _ = common.SIGDescribe("Services", func() { err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }) - svc, err := jig.CreateTCPServiceWithPort(nil, 80) + svc, err := jig.CreateTCPServiceWithPort(ctx, nil, 80) framework.ExpectNoError(err) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) names := map[string]bool{} ginkgo.DeferCleanup(func(ctx context.Context) { @@ -808,34 +808,34 @@ var _ = common.SIGDescribe("Services", func() { name1 := "pod1" name2 := "pod2" - createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80") + createPodOrFail(ctx, f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80") names[name1] = true - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {80}}) ginkgo.By("Checking if the Service forwards traffic to pod1") - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(svc, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) - createPodOrFail(f, ns, name2, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80") + createPodOrFail(ctx, f, ns, name2, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80") names[name2] = true - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {80}, name2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {80}, name2: {80}}) ginkgo.By("Checking if the Service forwards traffic to pod1 and pod2") - err = jig.CheckServiceReachability(svc, execPod) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) - e2epod.DeletePodOrFail(cs, ns, name1) + e2epod.DeletePodOrFail(ctx, cs, ns, name1) delete(names, name1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name2: {80}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name2: {80}}) ginkgo.By("Checking if the Service forwards traffic to pod2") - err = jig.CheckServiceReachability(svc, execPod) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) - e2epod.DeletePodOrFail(cs, ns, name2) + e2epod.DeletePodOrFail(ctx, cs, ns, name2) delete(names, name2) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) }) /* @@ -858,7 +858,7 @@ var _ = common.SIGDescribe("Services", func() { svc2port := "svc2" ginkgo.By("creating service " + serviceName + " in namespace " + ns) - svc, err := jig.CreateTCPService(func(service *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(service *v1.Service) { service.Spec.Ports = []v1.ServicePort{ { Name: "portname1", @@ -876,7 +876,7 @@ var _ = common.SIGDescribe("Services", func() { port1 := 100 port2 := 101 - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) names := map[string]bool{} ginkgo.DeferCleanup(func(ctx context.Context) { @@ -902,26 +902,26 @@ var _ = common.SIGDescribe("Services", func() { podname1 := "pod1" podname2 := "pod2" - createPodOrFail(f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1)) + createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1)) names[podname1] = true - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}}) - createPodOrFail(f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2)) + createPodOrFail(ctx, f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2)) names[podname2] = true - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}}) ginkgo.By("Checking if the Service forwards traffic to pods") - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(svc, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) - e2epod.DeletePodOrFail(cs, ns, podname1) + e2epod.DeletePodOrFail(ctx, cs, ns, podname1) delete(names, podname1) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname2: {port2}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname2: {port2}}) - e2epod.DeletePodOrFail(cs, ns, podname2) + e2epod.DeletePodOrFail(ctx, cs, ns, podname2) delete(names, podname2) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) }) ginkgo.It("should be updated after adding or deleting ports ", func(ctx context.Context) { @@ -931,7 +931,7 @@ var _ = common.SIGDescribe("Services", func() { svc1port := "svc1" ginkgo.By("creating service " + serviceName + " in namespace " + ns) - svc, err := jig.CreateTCPService(func(service *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(service *v1.Service) { service.Spec.Ports = []v1.ServicePort{ { Name: "portname1", @@ -941,7 +941,7 @@ var _ = common.SIGDescribe("Services", func() { } }) framework.ExpectNoError(err) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) podname1 := "pod1" port1 := 100 @@ -951,17 +951,17 @@ var _ = common.SIGDescribe("Services", func() { ContainerPort: int32(port1), }, } - createPodOrFail(f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1}}) + createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}}) ginkgo.By("Checking if the Service " + serviceName + " forwards traffic to " + podname1) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(svc, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) ginkgo.By("Adding a new port to service " + serviceName) svc2port := "svc2" - svc, err = jig.UpdateService(func(s *v1.Service) { + svc, err = jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Ports = []v1.ServicePort{ { Name: "portname1", @@ -986,15 +986,15 @@ var _ = common.SIGDescribe("Services", func() { ContainerPort: int32(port2), }, } - createPodOrFail(f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}}) + createPodOrFail(ctx, f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}}) ginkgo.By("Checking if the Service forwards traffic to " + podname1 + " and " + podname2) - err = jig.CheckServiceReachability(svc, execPod) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) ginkgo.By("Deleting a port from service " + serviceName) - svc, err = jig.UpdateService(func(s *v1.Service) { + svc, err = jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Ports = []v1.ServicePort{ { Name: "portname1", @@ -1006,8 +1006,8 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Checking if the Service forwards traffic to " + podname1 + " and not forwards to " + podname2) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1}}) - err = jig.CheckServiceReachability(svc, execPod) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}}) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) }) @@ -1017,7 +1017,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -1032,7 +1032,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) jig.ExternalIPs = true servicePort := 8080 - tcpService, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort)) + tcpService, err := jig.CreateTCPServiceWithPort(ctx, nil, int32(servicePort)) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the sourceip test service") @@ -1043,7 +1043,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("sourceip-test cluster ip: %s", serviceIP) ginkgo.By("Picking 2 Nodes to test whether source IP is preserved or not") - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -1054,19 +1054,19 @@ var _ = common.SIGDescribe("Services", func() { serverPodName := "echo-sourceip" pod := e2epod.NewAgnhostPod(ns, serverPodName, nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort)) pod.Labels = jig.Labels - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the echo server pod") err := cs.CoreV1().Pods(ns).Delete(ctx, serverPodName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName) }) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}}) ginkgo.By("Creating pause pod deployment") - deployment := createPausePodDeployment(cs, "pause-pod", ns, nodeCounts) + deployment := createPausePodDeployment(ctx, cs, "pause-pod", ns, nodeCounts) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Deleting deployment") @@ -1076,11 +1076,11 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") - deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) + deployment, err = cs.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error in retrieving pause pod deployment") labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - pausePods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) + pausePods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()}) framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") framework.ExpectNotEqual(pausePods.Items[0].Spec.NodeName, pausePods.Items[1].Spec.NodeName) @@ -1102,7 +1102,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) jig.ExternalIPs = true servicePort := 8080 - svc, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort)) + svc, err := jig.CreateTCPServiceWithPort(ctx, nil, int32(servicePort)) framework.ExpectNoError(err) serviceIP := svc.Spec.ClusterIP framework.Logf("hairpin-test cluster ip: %s", serviceIP) @@ -1111,15 +1111,15 @@ var _ = common.SIGDescribe("Services", func() { serverPodName := "hairpin" podTemplate := e2epod.NewAgnhostPod(ns, serverPodName, nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort)) podTemplate.Labels = jig.Labels - pod, err := cs.CoreV1().Pods(ns).Create(context.TODO(), podTemplate, metav1.CreateOptions{}) + pod, err := cs.CoreV1().Pods(ns).Create(ctx, podTemplate, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) ginkgo.By("waiting for the service to expose an endpoint") - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}}) ginkgo.By("Checking if the pod can reach itself") - err = jig.CheckServiceReachability(svc, pod) + err = jig.CheckServiceReachability(ctx, svc, pod) framework.ExpectNoError(err) }) @@ -1132,30 +1132,30 @@ var _ = common.SIGDescribe("Services", func() { svc3 := "up-down-3" ginkgo.By("creating " + svc1 + " in namespace " + ns) - podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) ginkgo.By("creating " + svc2 + " in namespace " + ns) - podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) ginkgo.By("verifying service " + svc1 + " is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) ginkgo.By("verifying service " + svc2 + " is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. ginkgo.By("stopping service " + svc1) - framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1)) + framework.ExpectNoError(StopServeHostnameService(ctx, f.ClientSet, ns, svc1)) ginkgo.By("verifying service " + svc1 + " is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svc1IP, servicePort)) ginkgo.By("verifying service " + svc2 + " is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. ginkgo.By("creating service " + svc3 + " in namespace " + ns) - podNames3, svc3IP, err := StartServeHostnameService(cs, getServeHostnameService(svc3), ns, numPods) + podNames3, svc3IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc3), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns) if svc2IP == svc3IP { @@ -1163,10 +1163,10 @@ var _ = common.SIGDescribe("Services", func() { } ginkgo.By("verifying service " + svc2 + " is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) ginkgo.By("verifying service " + svc3 + " is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames3, svc3IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames3, svc3IP, servicePort)) }) ginkgo.It("should work after the service has been recreated", func(ctx context.Context) { @@ -1176,16 +1176,16 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating the service " + serviceName + " in namespace " + ns) ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, serviceName) - podNames, svcIP, _ := StartServeHostnameService(cs, getServeHostnameService(serviceName), ns, numPods) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svcIP, servicePort)) + podNames, svcIP, _ := StartServeHostnameService(ctx, cs, getServeHostnameService(serviceName), ns, numPods) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames, svcIP, servicePort)) ginkgo.By("deleting the service " + serviceName + " in namespace " + ns) - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) + err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for the service " + serviceName + " in namespace " + ns + " to disappear") if pollErr := wait.PollImmediate(framework.Poll, e2eservice.RespondingTimeout, func() (bool, error) { - _, err := cs.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) + _, err := cs.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Service %s/%s is gone.", ns, serviceName) @@ -1200,14 +1200,14 @@ var _ = common.SIGDescribe("Services", func() { } ginkgo.By("recreating the service " + serviceName + " in namespace " + ns) - svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), getServeHostnameService(serviceName), metav1.CreateOptions{}) + svc, err := cs.CoreV1().Services(ns).Create(ctx, getServeHostnameService(serviceName), metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svc.Spec.ClusterIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames, svc.Spec.ClusterIP, servicePort)) }) ginkgo.It("should work after restarting kube-proxy [Disruptive]", func(ctx context.Context) { kubeProxyLabelSet := map[string]string{clusterAddonLabelKey: kubeProxyLabelName} - e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(kubeProxyLabelName, cs, metav1.NamespaceSystem, kubeProxyLabelSet) + e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeProxyLabelName, cs, metav1.NamespaceSystem, kubeProxyLabelSet) // TODO: use the ServiceTestJig here ns := f.Namespace.Name @@ -1217,31 +1217,31 @@ var _ = common.SIGDescribe("Services", func() { svc2 := "restart-proxy-2" ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1) - podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2) - podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) - if err := restartComponent(cs, kubeProxyLabelName, metav1.NamespaceSystem, kubeProxyLabelSet); err != nil { + if err := restartComponent(ctx, cs, kubeProxyLabelName, metav1.NamespaceSystem, kubeProxyLabelSet); err != nil { framework.Failf("error restarting kube-proxy: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) }) ginkgo.It("should work after restarting apiserver [Disruptive]", func(ctx context.Context) { if !framework.ProviderIs("gke") { - e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) + e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) } // TODO: use the ServiceTestJig here @@ -1252,32 +1252,32 @@ var _ = common.SIGDescribe("Services", func() { svc2 := "restart-apiserver-2" ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1) - podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) // Restart apiserver ginkgo.By("Restarting apiserver") - if err := restartApiserver(ns, cs); err != nil { + if err := restartApiserver(ctx, ns, cs); err != nil { framework.Failf("error restarting apiserver: %v", err) } ginkgo.By("Waiting for apiserver to come up by polling /healthz") - if err := waitForAPIServerUp(cs); err != nil { + if err := waitForAPIServerUp(ctx, cs); err != nil { framework.Failf("error while waiting for apiserver up: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2) - podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) }) /* @@ -1294,17 +1294,17 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns) - nodePortService, err := jig.CreateTCPService(func(svc *v1.Service) { + nodePortService, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(9376)}, } }) framework.ExpectNoError(err) - err = jig.CreateServicePods(2) + err = jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(nodePortService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, nodePortService, execPod) framework.ExpectNoError(err) }) @@ -1325,7 +1325,7 @@ var _ = common.SIGDescribe("Services", func() { jig.ExternalIPs = true ginkgo.By("creating service " + serviceName + " with type=clusterIP in namespace " + ns) - clusterIPService, err := jig.CreateTCPService(func(svc *v1.Service) { + clusterIPService, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.ExternalIPs = []string{externalIP} svc.Spec.Ports = []v1.ServicePort{ @@ -1336,10 +1336,10 @@ var _ = common.SIGDescribe("Services", func() { e2eskipper.Skipf("Admission controller to deny services with external IPs is enabled - skip.") } framework.ExpectNoError(err) - err = jig.CreateServicePods(2) + err = jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(clusterIPService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, clusterIPService, execPod) framework.ExpectNoError(err) }) @@ -1357,7 +1357,7 @@ var _ = common.SIGDescribe("Services", func() { jig.ExternalIPs = true ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) - tcpService, err := jig.CreateTCPService(nil) + tcpService, err := jig.CreateTCPService(ctx, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the updating NodePorts test service") @@ -1367,7 +1367,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port) ginkgo.By("changing the TCP service to type=NodePort") - nodePortService, err := jig.UpdateService(func(s *v1.Service) { + nodePortService, err := jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.Ports = []v1.ServicePort{ { @@ -1380,14 +1380,14 @@ var _ = common.SIGDescribe("Services", func() { }) framework.ExpectNoError(err) - err = jig.CreateServicePods(2) + err = jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(nodePortService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, nodePortService, execPod) framework.ExpectNoError(err) ginkgo.By("Updating NodePort service to listen TCP and UDP based requests over same Port") - nodePortService, err = jig.UpdateService(func(s *v1.Service) { + nodePortService, err = jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.Ports = []v1.ServicePort{ { @@ -1405,7 +1405,7 @@ var _ = common.SIGDescribe("Services", func() { } }) framework.ExpectNoError(err) - err = jig.CheckServiceReachability(nodePortService, execPod) + err = jig.CheckServiceReachability(ctx, nodePortService, execPod) framework.ExpectNoError(err) nodePortCounts := len(nodePortService.Spec.Ports) framework.ExpectEqual(nodePortCounts, 2, "updated service should have two Ports but found %d Ports", nodePortCounts) @@ -1429,7 +1429,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) - _, err := jig.CreateExternalNameService(nil) + _, err := jig.CreateExternalNameService(ctx, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the ExternalName to ClusterIP test service") @@ -1438,7 +1438,7 @@ var _ = common.SIGDescribe("Services", func() { }) ginkgo.By("changing the ExternalName service to type=ClusterIP") - clusterIPService, err := jig.UpdateService(func(s *v1.Service) { + clusterIPService, err := jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.ExternalName = "" s.Spec.Ports = []v1.ServicePort{ @@ -1447,10 +1447,10 @@ var _ = common.SIGDescribe("Services", func() { }) framework.ExpectNoError(err) - err = jig.CreateServicePods(2) + err = jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(clusterIPService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, clusterIPService, execPod) framework.ExpectNoError(err) }) @@ -1468,7 +1468,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) - _, err := jig.CreateExternalNameService(nil) + _, err := jig.CreateExternalNameService(ctx, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the ExternalName to NodePort test service") @@ -1477,7 +1477,7 @@ var _ = common.SIGDescribe("Services", func() { }) ginkgo.By("changing the ExternalName service to type=NodePort") - nodePortService, err := jig.UpdateService(func(s *v1.Service) { + nodePortService, err := jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.ExternalName = "" s.Spec.Ports = []v1.ServicePort{ @@ -1485,11 +1485,11 @@ var _ = common.SIGDescribe("Services", func() { } }) framework.ExpectNoError(err) - err = jig.CreateServicePods(2) + err = jig.CreateServicePods(ctx, 2) framework.ExpectNoError(err) - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(nodePortService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, nodePortService, execPod) framework.ExpectNoError(err) }) @@ -1506,7 +1506,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) - _, err := jig.CreateTCPService(nil) + _, err := jig.CreateTCPService(ctx, nil) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the ClusterIP to ExternalName test service") @@ -1516,11 +1516,11 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service") externalServiceName := "externalsvc" - externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName) + externalServiceFQDN := createAndGetExternalServiceFQDN(ctx, cs, ns, externalServiceName) ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName) ginkgo.By("changing the ClusterIP service to type=ExternalName") - externalNameService, err := jig.UpdateService(func(s *v1.Service) { + externalNameService, err := jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = externalServiceFQDN }) @@ -1528,8 +1528,8 @@ var _ = common.SIGDescribe("Services", func() { if externalNameService.Spec.ClusterIP != "" { framework.Failf("Spec.ClusterIP was not cleared") } - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(externalNameService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, externalNameService, execPod) framework.ExpectNoError(err) }) @@ -1546,7 +1546,7 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) - _, err := jig.CreateTCPService(func(svc *v1.Service) { + _, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) framework.ExpectNoError(err) @@ -1558,11 +1558,11 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service") externalServiceName := "externalsvc" - externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName) + externalServiceFQDN := createAndGetExternalServiceFQDN(ctx, cs, ns, externalServiceName) ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName) ginkgo.By("changing the NodePort service to type=ExternalName") - externalNameService, err := jig.UpdateService(func(s *v1.Service) { + externalNameService, err := jig.UpdateService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = externalServiceFQDN }) @@ -1570,8 +1570,8 @@ var _ = common.SIGDescribe("Services", func() { if externalNameService.Spec.ClusterIP != "" { framework.Failf("Spec.ClusterIP was not cleared") } - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(externalNameService, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, externalNameService, execPod) framework.ExpectNoError(err) }) @@ -1672,7 +1672,7 @@ var _ = common.SIGDescribe("Services", func() { } } ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) - result, err := e2eservice.UpdateService(cs, ns, serviceName, func(s *v1.Service) { + result, err := e2eservice.UpdateService(ctx, cs, ns, serviceName, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { @@ -1736,7 +1736,7 @@ var _ = common.SIGDescribe("Services", func() { err = t.DeleteService(serviceName) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) - hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") + hostExec := launchHostExecPod(ctx, f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { @@ -1823,16 +1823,16 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Verifying pods for RC " + t.Name) - framework.ExpectNoError(e2epod.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) + framework.ExpectNoError(e2epod.VerifyPods(ctx, t.Client, t.Namespace, t.Name, false, 1)) svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName) - execPod := e2epod.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) + execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, f.Namespace.Name, "execpod-", nil) execPodName := execPod.Name cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port) var stdout string - if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, func(ctx context.Context) (bool, error) { var err error stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { @@ -1845,10 +1845,10 @@ var _ = common.SIGDescribe("Services", func() { } ginkgo.By("Scaling down replication controller to zero") - e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) + e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) ginkgo.By("Update service to not tolerate unready services") - _, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.Spec.PublishNotReadyAddresses = false }) framework.ExpectNoError(err) @@ -1868,7 +1868,7 @@ var _ = common.SIGDescribe("Services", func() { } ginkgo.By("Update service to tolerate unready services again") - _, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.Spec.PublishNotReadyAddresses = true }) framework.ExpectNoError(err) @@ -1891,13 +1891,13 @@ var _ = common.SIGDescribe("Services", func() { label := labels.SelectorFromSet(labels.Set(t.Labels)) options := metav1.ListOptions{LabelSelector: label.String()} podClient := t.Client.CoreV1().Pods(f.Namespace.Name) - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) if err != nil { framework.Logf("warning: error retrieving pods: %s", err) } else { for _, pod := range pods.Items { var gracePeriodSeconds int64 = 0 - err := podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) + err := podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) if err != nil { framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err) } @@ -1906,7 +1906,7 @@ var _ = common.SIGDescribe("Services", func() { }) ginkgo.It("should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true", func(ctx context.Context) { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -1921,7 +1921,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a NodePort TCP service " + serviceName + " that PublishNotReadyAddresses on" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -1977,21 +1977,21 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout) if err != nil { framework.Failf("error waiting for pod %s to be ready %v", webserverPod0.Name, err) } - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 1 pause pods that will try to connect to the webservers") pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout) if err != nil { framework.Failf("error waiting for pod %s to be ready %v", pausePod1.Name, err) } @@ -1999,11 +1999,11 @@ var _ = common.SIGDescribe("Services", func() { // webserver should continue to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is unready but PublishNotReadyAddresses is true - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // Wait until the pod becomes unready - err = e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { + err = e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { return !podutil.IsPodReady(pod), nil }) if err != nil { @@ -2025,7 +2025,7 @@ var _ = common.SIGDescribe("Services", func() { }) ginkgo.It("should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false", func(ctx context.Context) { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2040,7 +2040,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a NodePort TCP service " + serviceName + " that PublishNotReadyAddresses on" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -2096,21 +2096,21 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout) if err != nil { framework.Failf("error waiting for pod %s to be ready %v", webserverPod0.Name, err) } - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 1 pause pods that will try to connect to the webservers") pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout) if err != nil { framework.Failf("error waiting for pod %s to be ready %v", pausePod1.Name, err) } @@ -2118,11 +2118,11 @@ var _ = common.SIGDescribe("Services", func() { // webserver should stop to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is unready but PublishNotReadyAddresses is false - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // Wait until the pod becomes unready - err = e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { + err = e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { return !podutil.IsPodReady(pod), nil }) if err != nil { @@ -2175,13 +2175,13 @@ var _ = common.SIGDescribe("Services", func() { framework.ConformanceIt("should have session affinity work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip") svc.Spec.Type = v1.ServiceTypeClusterIP - execAffinityTestForNonLBService(f, cs, svc) + execAffinityTestForNonLBService(ctx, f, cs, svc) }) ginkgo.It("should have session affinity timeout work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip-timeout") svc.Spec.Type = v1.ServiceTypeClusterIP - execAffinityTestForSessionAffinityTimeout(f, cs, svc) + execAffinityTestForSessionAffinityTimeout(ctx, f, cs, svc) }) /* @@ -2197,7 +2197,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ConformanceIt("should be able to switch session affinity for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip-transition") svc.Spec.Type = v1.ServiceTypeClusterIP - execAffinityTestForNonLBServiceWithTransition(f, cs, svc) + execAffinityTestForNonLBServiceWithTransition(ctx, f, cs, svc) }) /* @@ -2212,13 +2212,13 @@ var _ = common.SIGDescribe("Services", func() { framework.ConformanceIt("should have session affinity work for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport") svc.Spec.Type = v1.ServiceTypeNodePort - execAffinityTestForNonLBService(f, cs, svc) + execAffinityTestForNonLBService(ctx, f, cs, svc) }) ginkgo.It("should have session affinity timeout work for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport-timeout") svc.Spec.Type = v1.ServiceTypeNodePort - execAffinityTestForSessionAffinityTimeout(f, cs, svc) + execAffinityTestForSessionAffinityTimeout(ctx, f, cs, svc) }) /* @@ -2234,7 +2234,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ConformanceIt("should be able to switch session affinity for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport-transition") svc.Spec.Type = v1.ServiceTypeNodePort - execAffinityTestForNonLBServiceWithTransition(f, cs, svc) + execAffinityTestForNonLBServiceWithTransition(ctx, f, cs, svc) }) ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func(ctx context.Context) { @@ -2250,42 +2250,42 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating service-disabled in namespace " + ns) svcDisabled := getServeHostnameService("service-proxy-disabled") svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels - _, svcDisabledIP, err := StartServeHostnameService(cs, svcDisabled, ns, numPods) + _, svcDisabledIP, err := StartServeHostnameService(ctx, cs, svcDisabled, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) ginkgo.By("creating service in namespace " + ns) svcToggled := getServeHostnameService("service-proxy-toggled") - podToggledNames, svcToggledIP, err := StartServeHostnameService(cs, svcToggled, ns, numPods) + podToggledNames, svcToggledIP, err := StartServeHostnameService(ctx, cs, svcToggled, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name) ginkgo.By("verifying service is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podToggledNames, svcToggledIP, servicePort)) ginkgo.By("verifying service-disabled is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcDisabledIP, servicePort)) ginkgo.By("adding service-proxy-name label") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.ObjectMeta.Labels = serviceProxyNameLabels }) framework.ExpectNoError(err) ginkgo.By("verifying service is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcToggledIP, servicePort)) ginkgo.By("removing service-proxy-name annotation") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.ObjectMeta.Labels = nil }) framework.ExpectNoError(err) ginkgo.By("verifying service is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podToggledNames, svcToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podToggledNames, svcToggledIP, servicePort)) ginkgo.By("verifying service-disabled is still not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcDisabledIP, servicePort)) }) ginkgo.It("should implement service.kubernetes.io/headless", func(ctx context.Context) { @@ -2302,61 +2302,61 @@ var _ = common.SIGDescribe("Services", func() { svcHeadless := getServeHostnameService("service-headless") svcHeadless.ObjectMeta.Labels = serviceHeadlessLabels // This should be improved, as we do not want a Headlesss Service to contain an IP... - _, svcHeadlessIP, err := StartServeHostnameService(cs, svcHeadless, ns, numPods) + _, svcHeadlessIP, err := StartServeHostnameService(ctx, cs, svcHeadless, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with headless service: %s in the namespace: %s", svcHeadlessIP, ns) ginkgo.By("creating service in namespace " + ns) svcHeadlessToggled := getServeHostnameService("service-headless-toggled") - podHeadlessToggledNames, svcHeadlessToggledIP, err := StartServeHostnameService(cs, svcHeadlessToggled, ns, numPods) + podHeadlessToggledNames, svcHeadlessToggledIP, err := StartServeHostnameService(ctx, cs, svcHeadlessToggled, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcHeadlessToggledIP, ns) jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name) ginkgo.By("verifying service is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort)) ginkgo.By("verifying service-headless is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessIP, servicePort)) ginkgo.By("adding service.kubernetes.io/headless label") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.ObjectMeta.Labels = serviceHeadlessLabels }) framework.ExpectNoError(err) ginkgo.By("verifying service is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessToggledIP, servicePort)) ginkgo.By("removing service.kubernetes.io/headless annotation") - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.ObjectMeta.Labels = nil }) framework.ExpectNoError(err) ginkgo.By("verifying service is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort)) ginkgo.By("verifying service-headless is still not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessIP, servicePort)) }) ginkgo.It("should be rejected when no endpoints exist", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "no-pods" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests) framework.ExpectNoError(err) port := 80 ginkgo.By("creating a service with no endpoints") - _, err = jig.CreateTCPServiceWithPort(nil, int32(port)) + _, err = jig.CreateTCPServiceWithPort(ctx, nil, int32(port)) framework.ExpectNoError(err) nodeName := nodes.Items[0].Name podName := "execpod-noendpoints" ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) - execPod := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { + execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, namespace, podName, func(pod *v1.Pod) { nodeSelection := e2epod.NodeSelection{Name: nodeName} e2epod.SetNodeSelection(&pod.Spec, nodeSelection) }) @@ -2389,14 +2389,14 @@ var _ = common.SIGDescribe("Services", func() { namespace := f.Namespace.Name serviceName := "evicted-pods" jig := e2eservice.NewTestJig(cs, namespace, serviceName) - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests) framework.ExpectNoError(err) nodeName := nodes.Items[0].Name port := 80 ginkgo.By("creating a service with no endpoints") - _, err = jig.CreateTCPServiceWithPort(func(s *v1.Service) { + _, err = jig.CreateTCPServiceWithPort(ctx, func(s *v1.Service) { // set publish not ready addresses to cover edge cases too s.Spec.PublishNotReadyAddresses = true }, int32(port)) @@ -2410,21 +2410,21 @@ var _ = common.SIGDescribe("Services", func() { evictedPod.Spec.Containers[0].Resources = v1.ResourceRequirements{ Limits: v1.ResourceList{"ephemeral-storage": resource.MustParse("5Mi")}, } - e2epod.NewPodClient(f).Create(evictedPod) - err = e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, evictedPod) + err = e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name) if err != nil { framework.Failf("error waiting for pod to be evicted: %v", err) } podName := "execpod-evictedpods" ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) - execPod := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { + execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, namespace, podName, func(pod *v1.Pod) { nodeSelection := e2epod.NodeSelection{Name: nodeName} e2epod.SetNodeSelection(&pod.Spec, nodeSelection) }) if epErr := wait.PollImmediate(framework.Poll, e2eservice.ServiceEndpointsTimeout, func() (bool, error) { - endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + endpoints, err := cs.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { framework.Logf("error fetching '%s/%s' Endpoints: %s", namespace, serviceName, err.Error()) return false, err @@ -2433,7 +2433,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("expected '%s/%s' Endpoints to be empty, got: %v", namespace, serviceName, endpoints.Subsets) return false, nil } - epsList, err := cs.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serviceName)}) + epsList, err := cs.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serviceName)}) if err != nil { framework.Logf("error fetching '%s/%s' EndpointSlices: %s", namespace, serviceName, err.Error()) return false, err @@ -2482,7 +2482,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2490,7 +2490,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2506,7 +2506,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns) local := v1.ServiceInternalTrafficPolicyLocal jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -2519,26 +2519,26 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Labels = jig.Labels e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) @@ -2560,7 +2560,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2568,7 +2568,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2584,7 +2584,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns) local := v1.ServiceInternalTrafficPolicyLocal jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 8000, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(8000)}, } @@ -2597,28 +2597,28 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Labels = jig.Labels e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) pausePod0.Spec.HostNetwork = true e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) pausePod1.Spec.HostNetwork = true e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) @@ -2640,7 +2640,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2648,7 +2648,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2667,7 +2667,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns) local := v1.ServiceInternalTrafficPolicyLocal jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(endpointPort)}, } @@ -2681,26 +2681,26 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.HostNetwork = true e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {endpointPort}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {endpointPort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webserver") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) @@ -2720,17 +2720,17 @@ var _ = common.SIGDescribe("Services", func() { pausePod2.Spec.HostNetwork = true e2epod.SetNodeSelection(&pausePod2.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod2, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod2, metav1.CreateOptions{}) + pausePod2, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod2, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod2.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod2.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod3 := e2epod.NewAgnhostPod(ns, "pause-pod-3", nil, nil, nil) pausePod3.Spec.HostNetwork = true e2epod.SetNodeSelection(&pausePod3.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod3, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod3, metav1.CreateOptions{}) + pausePod3, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod3, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod3.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod3.Name, f.Namespace.Name, framework.PodStartTimeout)) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout for i := 0; i < 5; i++ { @@ -2751,7 +2751,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2759,7 +2759,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2773,7 +2773,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -2788,17 +2788,17 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) nodeIPs := e2enode.GetAddresses(&node0, v1.NodeInternalIP) healthCheckNodePortAddr := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.HealthCheckNodePort))) @@ -2820,7 +2820,7 @@ var _ = common.SIGDescribe("Services", func() { // webserver should continue to serve traffic through the Service after deletion, even though the health check node port should return 503 ginkgo.By("Terminating the webserver pod") - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints @@ -2850,7 +2850,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2858,7 +2858,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2873,7 +2873,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -2886,30 +2886,30 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is the only ready endpoint - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // assert 5 times that both the local and remote pod can connect to the Service while all endpoints are terminating @@ -2933,7 +2933,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -2941,7 +2941,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -2957,7 +2957,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) local := v1.ServiceInternalTrafficPolicyLocal - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -2971,30 +2971,30 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is the only ready endpoint - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout @@ -3021,7 +3021,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -3029,7 +3029,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -3044,7 +3044,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -3058,30 +3058,30 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is the only ready endpoint - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // assert 5 times that both the local and remote pod can connect to the Service NodePort while all endpoints are terminating @@ -3106,7 +3106,7 @@ var _ = common.SIGDescribe("Services", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "userspace" { e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode") } @@ -3114,7 +3114,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err) } - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) if nodeCounts < 2 { @@ -3129,7 +3129,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, } @@ -3144,30 +3144,30 @@ var _ = common.SIGDescribe("Services", func() { webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{}) + _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}}) ginkgo.By("Creating 2 pause pods that will try to connect to the webservers") pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil) e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name}) - pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{}) + pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout)) pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil) e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name}) - pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{}) + pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: // - it has a 600s termination grace period // - it is the only ready endpoint - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) // assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout @@ -3202,7 +3202,7 @@ var _ = common.SIGDescribe("Services", func() { */ framework.ConformanceIt("should find a service from listing all namespaces", func(ctx context.Context) { ginkgo.By("fetching services") - svcs, _ := f.ClientSet.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{}) + svcs, _ := f.ClientSet.CoreV1().Services("").List(ctx, metav1.ListOptions{}) foundSvc := false for _, svc := range svcs.Items { @@ -3249,19 +3249,19 @@ var _ = common.SIGDescribe("Services", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = "test-endpoint-static=true" - return f.ClientSet.CoreV1().Endpoints(testNamespaceName).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Endpoints(testNamespaceName).Watch(ctx, options) }, } - endpointsList, err := f.ClientSet.CoreV1().Endpoints("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) + endpointsList, err := f.ClientSet.CoreV1().Endpoints("").List(ctx, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) framework.ExpectNoError(err, "failed to list Endpoints") ginkgo.By("creating an Endpoint") - _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(context.TODO(), &testEndpoints, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(ctx, &testEndpoints, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Endpoint") ginkgo.By("waiting for available Endpoint") - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Added: if endpoints, ok := event.Object.(*v1.Endpoints); ok { @@ -3277,7 +3277,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Added) ginkgo.By("listing all Endpoints") - endpointsList, err = f.ClientSet.CoreV1().Endpoints("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) + endpointsList, err = f.ClientSet.CoreV1().Endpoints("").List(ctx, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) framework.ExpectNoError(err, "failed to list Endpoints") eventFound := false var foundEndpoint v1.Endpoints @@ -3294,12 +3294,12 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By("updating the Endpoint") foundEndpoint.ObjectMeta.Labels["test-service"] = "updated" - _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Update(context.TODO(), &foundEndpoint, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Update(ctx, &foundEndpoint, metav1.UpdateOptions{}) framework.ExpectNoError(err, "failed to update Endpoint with new label") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if endpoints, ok := event.Object.(*v1.Endpoints); ok { @@ -3315,7 +3315,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Modified) ginkgo.By("fetching the Endpoint") - endpoints, err := f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(context.TODO(), testEndpointName, metav1.GetOptions{}) + endpoints, err := f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch Endpoint") framework.ExpectEqual(foundEndpoint.ObjectMeta.Labels["test-service"], "updated", "failed to update Endpoint %v in namespace %v label not updated", testEndpointName, testNamespaceName) @@ -3343,11 +3343,11 @@ var _ = common.SIGDescribe("Services", func() { }) framework.ExpectNoError(err, "failed to marshal JSON for WatchEvent patch") ginkgo.By("patching the Endpoint") - _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Patch(context.TODO(), testEndpointName, types.StrategicMergePatchType, []byte(endpointPatch), metav1.PatchOptions{}) + _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Patch(ctx, testEndpointName, types.StrategicMergePatchType, []byte(endpointPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch Endpoint") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: if endpoints, ok := event.Object.(*v1.Endpoints); ok { @@ -3363,7 +3363,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Modified) ginkgo.By("fetching the Endpoint") - endpoints, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(context.TODO(), testEndpointName, metav1.GetOptions{}) + endpoints, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch Endpoint") framework.ExpectEqual(endpoints.ObjectMeta.Labels["test-service"], "patched", "failed to patch Endpoint with Label") endpointSubsetOne := endpoints.Subsets[0] @@ -3374,13 +3374,13 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectEqual(endpointSubsetOnePorts.Port, int32(8080), "failed to patch Endpoint") ginkgo.By("deleting the Endpoint by Collection") - err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) + err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"}) framework.ExpectNoError(err, "failed to delete Endpoint by Collection") ginkgo.By("waiting for Endpoint deletion") - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() - _, err = watchtools.Until(ctx, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: if endpoints, ok := event.Object.(*v1.Endpoints); ok { @@ -3396,7 +3396,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to see %v event", watch.Deleted) ginkgo.By("fetching the Endpoint") - _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(context.TODO(), testEndpointName, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{}) framework.ExpectError(err, "should not be able to fetch Endpoint") }) @@ -3422,11 +3422,11 @@ var _ = common.SIGDescribe("Services", func() { w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = testSvcLabelsFlat - return cs.CoreV1().Services(ns).Watch(context.TODO(), options) + return cs.CoreV1().Services(ns).Watch(ctx, options) }, } - svcList, err := cs.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{LabelSelector: testSvcLabelsFlat}) + svcList, err := cs.CoreV1().Services("").List(ctx, metav1.ListOptions{LabelSelector: testSvcLabelsFlat}) framework.ExpectNoError(err, "failed to list Services") ginkgo.By("creating a Service") @@ -3445,13 +3445,13 @@ var _ = common.SIGDescribe("Services", func() { }}, }, } - _, err = cs.CoreV1().Services(ns).Create(context.TODO(), &testService, metav1.CreateOptions{}) + _, err = cs.CoreV1().Services(ns).Create(ctx, &testService, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Service") ginkgo.By("watching for the Service to be added") - ctx, cancel := context.WithTimeout(ctx, svcReadyTimeout) + ctxUntil, cancel := context.WithTimeout(ctx, svcReadyTimeout) defer cancel() - _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { found := svc.ObjectMeta.Name == testService.ObjectMeta.Name && svc.ObjectMeta.Namespace == ns && @@ -3470,7 +3470,7 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Service %s created", testSvcName) ginkgo.By("Getting /status") - svcStatusUnstructured, err := f.DynamicClient.Resource(svcResource).Namespace(ns).Get(context.TODO(), testSvcName, metav1.GetOptions{}, "status") + svcStatusUnstructured, err := f.DynamicClient.Resource(svcResource).Namespace(ns).Get(ctx, testSvcName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch ServiceStatus of Service %s in namespace %s", testSvcName, ns) svcStatusBytes, err := json.Marshal(svcStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) @@ -3486,16 +3486,16 @@ var _ = common.SIGDescribe("Services", func() { } lbStatusJSON, err := json.Marshal(lbStatus) framework.ExpectNoError(err, "Failed to marshal JSON. %v", err) - _, err = svcClient.Patch(context.TODO(), testSvcName, types.MergePatchType, + _, err = svcClient.Patch(ctx, testSvcName, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Could not patch service status", err) ginkgo.By("watching for the Service to be patched") - ctx, cancel = context.WithTimeout(context.Background(), svcReadyTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout) defer cancel() - _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { found := svc.ObjectMeta.Name == testService.ObjectMeta.Name && svc.ObjectMeta.Namespace == ns && @@ -3517,7 +3517,7 @@ var _ = common.SIGDescribe("Services", func() { var statusToUpdate, updatedStatus *v1.Service err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - statusToUpdate, err = svcClient.Get(context.TODO(), testSvcName, metav1.GetOptions{}) + statusToUpdate, err = svcClient.Get(ctx, testSvcName, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to retrieve service %s", testSvcName) statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, metav1.Condition{ @@ -3527,16 +3527,16 @@ var _ = common.SIGDescribe("Services", func() { Message: "Set from e2e test", }) - updatedStatus, err = svcClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + updatedStatus, err = svcClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) return err }) framework.ExpectNoError(err, "\n\n Failed to UpdateStatus. %v\n\n", err) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the Service to be updated") - ctx, cancel = context.WithTimeout(context.Background(), svcReadyTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout) defer cancel() - _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { found := svc.ObjectMeta.Name == testService.ObjectMeta.Name && svc.ObjectMeta.Namespace == ns && @@ -3572,13 +3572,13 @@ var _ = common.SIGDescribe("Services", func() { }, }) - _, err = svcClient.Patch(context.TODO(), testSvcName, types.StrategicMergePatchType, []byte(servicePatchPayload), metav1.PatchOptions{}) + _, err = svcClient.Patch(ctx, testSvcName, types.StrategicMergePatchType, []byte(servicePatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch service. %v", err) ginkgo.By("watching for the Service to be patched") - ctx, cancel = context.WithTimeout(context.Background(), svcReadyTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout) defer cancel() - _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { found := svc.ObjectMeta.Name == testService.ObjectMeta.Name && svc.ObjectMeta.Namespace == ns && @@ -3597,13 +3597,13 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Service %s patched", testSvcName) ginkgo.By("deleting the service") - err = cs.CoreV1().Services(ns).Delete(context.TODO(), testSvcName, metav1.DeleteOptions{}) + err = cs.CoreV1().Services(ns).Delete(ctx, testSvcName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the Service. %v", err) ginkgo.By("watching for the Service to be deleted") - ctx, cancel = context.WithTimeout(context.Background(), svcReadyTimeout) + ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout) defer cancel() - _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { + _, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: if svc, ok := event.Object.(*v1.Service); ok { @@ -3688,21 +3688,21 @@ var _ = common.SIGDescribe("Services", func() { }}, }, } - _, err := svcClient.Create(context.TODO(), &svc, metav1.CreateOptions{}) + _, err := svcClient.Create(ctx, &svc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Service") }() } - svcList, err := cs.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err := cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list Services") framework.ExpectEqual(len(svcList.Items), 3, "Required count of services out of sync") ginkgo.By("deleting service collection") - err = svcDynamicClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: deleteLabel}) + err = svcDynamicClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: deleteLabel}) framework.ExpectNoError(err, "failed to delete service collection. %v", err) - svcList, err = cs.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + svcList, err = cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list Services") framework.ExpectEqual(len(svcList.Items), 1, "Required count of services out of sync") @@ -3770,14 +3770,14 @@ var _ = common.SIGDescribe("Services", func() { podname1 := "pod1" - createPodOrFail(f, ns, podname1, jig.Labels, containerPorts, "netexec", "--http-port", strconv.Itoa(containerPort), "--udp-port", strconv.Itoa(containerPort)) + createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts, "netexec", "--http-port", strconv.Itoa(containerPort), "--udp-port", strconv.Itoa(containerPort)) validateEndpointsPortsWithProtocolsOrFail(cs, ns, serviceName, fullPortsByPodName{podname1: containerPorts}) ginkgo.By("Checking if the Service forwards traffic to pods") - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil) - err = jig.CheckServiceReachability(svc, execPod) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) - e2epod.DeletePodOrFail(cs, ns, podname1) + e2epod.DeletePodOrFail(ctx, cs, ns, podname1) }) // These is [Serial] because it can't run at the same time as the @@ -3788,30 +3788,30 @@ var _ = common.SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("getting the state of the sctp module on nodes") - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2) framework.ExpectNoError(err) - sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) ginkgo.By("creating service " + serviceName + " in namespace " + ns) - _, err = jig.CreateSCTPServiceWithPort(nil, 5060) + _, err = jig.CreateSCTPServiceWithPort(ctx, nil, 5060) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }) - err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) + err = e2enetwork.WaitForService(ctx, f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err)) ginkgo.By("validating endpoints do not exist yet") - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) ginkgo.By("creating a pod for the service") names := map[string]bool{} name1 := "pod1" - createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}}) + createPodOrFail(ctx, f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}}) names[name1] = true ginkgo.DeferCleanup(func(ctx context.Context) { for name := range names { @@ -3821,16 +3821,16 @@ var _ = common.SIGDescribe("Services", func() { }) ginkgo.By("validating endpoints exists") - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {5060}}) ginkgo.By("deleting the pod") - e2epod.DeletePodOrFail(cs, ns, name1) + e2epod.DeletePodOrFail(ctx, cs, ns, name1) delete(names, name1) ginkgo.By("validating endpoints do not exist anymore") - validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) + validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{}) ginkgo.By("validating sctp module is still not loaded") - sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes) if !sctpLoadedAtStart && sctpLoadedAtEnd { framework.Failf("The state of the sctp module has changed due to the test case") } @@ -3841,14 +3841,14 @@ var _ = common.SIGDescribe("Services", func() { // affinity test for non-load-balancer services. Session affinity will be // enabled when the service is created and a short timeout will be configured so // session affinity must change after the timeout expirese. -func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { +func execAffinityTestForSessionAffinityTimeout(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) { ns := f.Namespace.Name numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name ginkgo.By("creating service in namespace " + ns) serviceType := svc.Spec.Type // set an affinity timeout equal to the number of connection requests svcSessionAffinityTimeout := int32(AffinityConfirmCount) - if proxyMode, err := proxyMode(f); err == nil { + if proxyMode, err := proxyMode(ctx, f); err == nil { if proxyMode == "ipvs" { // session affinity timeout must be greater than 120 in ipvs mode, // because IPVS module has a hardcoded TIME_WAIT timeout of 120s, @@ -3864,15 +3864,15 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout}, } - _, _, err := StartServeHostnameService(cs, svc, ns, numPods) + _, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) + svc, err = jig.Client.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string if serviceType == v1.ServiceTypeNodePort { - nodes, err := e2enode.GetReadySchedulableNodes(cs) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) // The node addresses must have the same IP family as the ClusterIP family := v1.IPv4Protocol @@ -3886,17 +3886,17 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client svcIP = svc.Spec.ClusterIP } - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod-affinity", nil) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) }) - err = jig.CheckServiceReachability(svc, execPod) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) // the service should be sticky until the timeout expires - framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true) + framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true) // but it should return different hostnames after the timeout expires // try several times to avoid the probability that we hit the same pod twice hosts := sets.NewString() @@ -3926,12 +3926,12 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client framework.Fail("Session is sticky after reaching the timeout") } -func execAffinityTestForNonLBServiceWithTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { - execAffinityTestForNonLBServiceWithOptionalTransition(f, cs, svc, true) +func execAffinityTestForNonLBServiceWithTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) { + execAffinityTestForNonLBServiceWithOptionalTransition(ctx, f, cs, svc, true) } -func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { - execAffinityTestForNonLBServiceWithOptionalTransition(f, cs, svc, false) +func execAffinityTestForNonLBService(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) { + execAffinityTestForNonLBServiceWithOptionalTransition(ctx, f, cs, svc, false) } // execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of @@ -3939,21 +3939,21 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf // enabled when the service is created. If parameter isTransitionTest is true, // session affinity will be switched off/on and test if the service converges // to a stable affinity state. -func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { +func execAffinityTestForNonLBServiceWithOptionalTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { ns := f.Namespace.Name numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name ginkgo.By("creating service in namespace " + ns) serviceType := svc.Spec.Type svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - _, _, err := StartServeHostnameService(cs, svc, ns, numPods) + _, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) + svc, err = jig.Client.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string if serviceType == v1.ServiceTypeNodePort { - nodes, err := e2enode.GetReadySchedulableNodes(cs) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) // The node addresses must have the same IP family as the ClusterIP family := v1.IPv4Protocol @@ -3967,89 +3967,89 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor svcIP = svc.Spec.ClusterIP } - execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) + execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod-affinity", nil) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) }) - err = jig.CheckServiceReachability(svc, execPod) + err = jig.CheckServiceReachability(ctx, svc, execPod) framework.ExpectNoError(err) if !isTransitionTest { - framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true) + framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true) } if isTransitionTest { - _, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) framework.ExpectNoError(err) - framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, false), true) - _, err = jig.UpdateService(func(svc *v1.Service) { + framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, false), true) + _, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) framework.ExpectNoError(err) - framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true) + framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true) } } -func execAffinityTestForLBServiceWithTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { - execAffinityTestForLBServiceWithOptionalTransition(f, cs, svc, true) +func execAffinityTestForLBServiceWithTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) { + execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, true) } -func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { - execAffinityTestForLBServiceWithOptionalTransition(f, cs, svc, false) +func execAffinityTestForLBService(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) { + execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, false) } // execAffinityTestForLBServiceWithOptionalTransition is a helper function that wrap the logic of // affinity test for load balancer services, similar to // execAffinityTestForNonLBServiceWithOptionalTransition. -func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { +func execAffinityTestForLBServiceWithOptionalTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name ginkgo.By("creating service in namespace " + ns) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - _, _, err := StartServeHostnameService(cs, svc, ns, numPods) + _, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) jig := e2eservice.NewTestJig(cs, ns, serviceName) ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) - svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + svc, err = jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { - podNodePairs, err := e2enode.PodNodePairs(cs, ns) + podNodePairs, err := e2enode.PodNodePairs(ctx, cs, ns) framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err) - StopServeHostnameService(cs, ns, serviceName) + _ = StopServeHostnameService(ctx, cs, ns, serviceName) lb := cloudprovider.DefaultLoadBalancerName(svc) framework.Logf("cleaning load balancer resource for %s", lb) - e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + e2eservice.CleanupServiceResources(ctx, cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) }) ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, true), true) + framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, true), true) } if isTransitionTest { - svc, err = jig.UpdateService(func(svc *v1.Service) { + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) framework.ExpectNoError(err) - framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, false), true) - svc, err = jig.UpdateService(func(svc *v1.Service) { + framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, false), true) + svc, err = jig.UpdateService(ctx, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) framework.ExpectNoError(err) - framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, true), true) + framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, true), true) } } -func createAndGetExternalServiceFQDN(cs clientset.Interface, ns, serviceName string) string { - _, _, err := StartServeHostnameService(cs, getServeHostnameService(serviceName), ns, 2) +func createAndGetExternalServiceFQDN(ctx context.Context, cs clientset.Interface, ns, serviceName string) string { + _, _, err := StartServeHostnameService(ctx, cs, getServeHostnameService(serviceName), ns, 2) framework.ExpectNoError(err, "Expected Service %s to be running", serviceName) return fmt.Sprintf("%s.%s.svc.%s", serviceName, ns, framework.TestContext.ClusterDNSDomain) } -func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment { +func createPausePodDeployment(ctx context.Context, cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment { labels := map[string]string{"deployment": "agnhost-pause"} pauseDeployment := e2edeployment.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType) @@ -4066,30 +4066,30 @@ func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas }, } - deployment, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), pauseDeployment, metav1.CreateOptions{}) + deployment, err := cs.AppsV1().Deployments(ns).Create(ctx, pauseDeployment, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error in creating deployment for pause pod") return deployment } // createPodOrFail creates a pod with the specified containerPorts. -func createPodOrFail(f *framework.Framework, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort, args ...string) { +func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort, args ...string) { ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) pod := e2epod.NewAgnhostPod(ns, name, nil, nil, containerPorts, args...) pod.ObjectMeta.Labels = labels // Add a dummy environment variable to work around a docker issue. // https://github.com/docker/docker/issues/14203 pod.Spec.Containers[0].Env = []v1.EnvVar{{Name: "FOO", Value: " "}} - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) } // launchHostExecPod launches a hostexec pod in the given namespace and waits // until it's Running -func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { +func launchHostExecPod(ctx context.Context, client clientset.Interface, ns, name string) *v1.Pod { framework.Logf("Creating new host exec pod") hostExecPod := e2epod.NewExecPodSpec(ns, name, true) - pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(ns).Create(ctx, hostExecPod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodReadyInNamespace(client, name, ns, framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, name, ns, framework.PodStartTimeout) framework.ExpectNoError(err) return pod } @@ -4114,10 +4114,10 @@ func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n } // proxyMode returns a proxyMode of a kube-proxy. -func proxyMode(f *framework.Framework) (string, error) { +func proxyMode(ctx context.Context, f *framework.Framework) (string, error) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil) pod.Spec.HostNetwork = true - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" @@ -4158,10 +4158,10 @@ func validatePorts(ep, expectedEndpoints portsByPodUID) error { return nil } -func translatePodNameToUID(c clientset.Interface, ns string, expectedEndpoints portsByPodName) (portsByPodUID, error) { +func translatePodNameToUID(ctx context.Context, c clientset.Interface, ns string, expectedEndpoints portsByPodName) (portsByPodUID, error) { portsByUID := make(portsByPodUID) for name, portList := range expectedEndpoints { - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } @@ -4171,9 +4171,9 @@ func translatePodNameToUID(c clientset.Interface, ns string, expectedEndpoints p } // validateEndpointsPortsOrFail validates that the given service exists and is served by the given expectedEndpoints. -func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints portsByPodName) { +func validateEndpointsPortsOrFail(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectedEndpoints portsByPodName) { ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) - expectedPortsByPodUID, err := translatePodNameToUID(c, namespace, expectedEndpoints) + expectedPortsByPodUID, err := translatePodNameToUID(ctx, c, namespace, expectedEndpoints) framework.ExpectNoError(err, "failed to translate pod name to UID, ns:%s, expectedEndpoints:%v", namespace, expectedEndpoints) var ( @@ -4183,7 +4183,7 @@ func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName if pollErr = wait.PollImmediate(time.Second, framework.ServiceStartTimeout, func() (bool, error) { i++ - ep, err := c.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + ep, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { framework.Logf("Failed go get Endpoints object: %v", err) // Retry the error @@ -4203,7 +4203,7 @@ func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName opts := metav1.ListOptions{ LabelSelector: "kubernetes.io/service-name=" + serviceName, } - es, err := c.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), opts) + es, err := c.DiscoveryV1().EndpointSlices(namespace).List(ctx, opts) if err != nil { framework.Logf("Failed go list EndpointSlice objects: %v", err) // Retry the error @@ -4221,7 +4221,7 @@ func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName serviceName, namespace, expectedEndpoints) return true, nil }); pollErr != nil { - if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}); err == nil { + if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}); err == nil { for _, pod := range pods.Items { framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } @@ -4232,22 +4232,22 @@ func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName framework.ExpectNoError(pollErr, "error waithing for service %s in namespace %s to expose endpoints %v: %v", serviceName, namespace, expectedEndpoints) } -func restartApiserver(namespace string, cs clientset.Interface) error { +func restartApiserver(ctx context.Context, namespace string, cs clientset.Interface) error { if framework.ProviderIs("gke") { // GKE use a same-version master upgrade to teardown/recreate master. v, err := cs.Discovery().ServerVersion() if err != nil { return err } - return e2eproviders.MasterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v' + return e2eproviders.MasterUpgradeGKE(ctx, namespace, v.GitVersion[1:]) // strip leading 'v' } - return restartComponent(cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) + return restartComponent(ctx, cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) } // restartComponent restarts component static pod -func restartComponent(cs clientset.Interface, cName, ns string, matchLabels map[string]string) error { - pods, err := e2epod.GetPods(cs, ns, matchLabels) +func restartComponent(ctx context.Context, cs clientset.Interface, cName, ns string, matchLabels map[string]string) error { + pods, err := e2epod.GetPods(ctx, cs, ns, matchLabels) if err != nil { return fmt.Errorf("failed to get %s's pods, err: %v", cName, err) } @@ -4255,11 +4255,11 @@ func restartComponent(cs clientset.Interface, cName, ns string, matchLabels map[ return fmt.Errorf("%s pod count is 0", cName) } - if err := e2epod.DeletePodsWithGracePeriod(cs, pods, 0); err != nil { + if err := e2epod.DeletePodsWithGracePeriod(ctx, cs, pods, 0); err != nil { return fmt.Errorf("failed to restart component: %s, err: %v", cName, err) } - _, err = e2epod.PodsCreatedByLabel(cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels)) + _, err = e2epod.PodsCreatedByLabel(ctx, cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels)) return err } diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index cb5345a390a..e80d394e0ee 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -90,7 +90,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() { f.ClientSet = kubernetes.NewForConfigOrDie(cfg) failing := sets.NewString() - d, err := runServiceLatencies(f, parallelTrials, totalTrials, acceptableFailureRatio) + d, err := runServiceLatencies(ctx, f, parallelTrials, totalTrials, acceptableFailureRatio) if err != nil { failing.Insert(fmt.Sprintf("Not all RC/pod/service trials succeeded: %v", err)) } @@ -134,7 +134,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() { }) }) -func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) { +func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) { cfg := testutils.RCConfig{ Client: f.ClientSet, Image: imageutils.GetPauseImageName(), @@ -143,7 +143,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab Replicas: 1, PollInterval: time.Second, } - if err := e2erc.RunRC(cfg); err != nil { + if err := e2erc.RunRC(ctx, cfg); err != nil { return nil, err } @@ -152,12 +152,13 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab // consumes the endpoints data, so it seems like the right thing to // test. endpointQueries := newQuerier() - startEndpointWatcher(f, endpointQueries) + startEndpointWatcher(ctx, f, endpointQueries) defer close(endpointQueries.stop) // run one test and throw it away-- this is to make sure that the pod's // ready status has propagated. - singleServiceLatency(f, cfg.Name, endpointQueries) + _, err = singleServiceLatency(ctx, f, cfg.Name, endpointQueries) + framework.ExpectNoError(err) // These channels are never closed, and each attempt sends on exactly // one of these channels, so the sum of the things sent over them will @@ -171,7 +172,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab defer ginkgo.GinkgoRecover() blocker <- struct{}{} defer func() { <-blocker }() - if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil { + if d, err := singleServiceLatency(ctx, f, cfg.Name, endpointQueries); err != nil { errs <- err } else { durations <- d @@ -295,15 +296,15 @@ func (eq *endpointQueries) added(e *v1.Endpoints) { } // blocks until it has finished syncing. -func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { +func startEndpointWatcher(ctx context.Context, f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(context.TODO(), options) + obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(ctx, options) }, }, &v1.Endpoints{}, @@ -334,7 +335,7 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { } } -func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) { +func singleServiceLatency(ctx context.Context, f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) { // Make a service that points to that pod. svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -348,7 +349,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie }, } startTime := time.Now() - gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) + gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { return 0, err } diff --git a/test/e2e/network/topology_hints.go b/test/e2e/network/topology_hints.go index 4bbb25b15a4..0424aa46380 100644 --- a/test/e2e/network/topology_hints.go +++ b/test/e2e/network/topology_hints.go @@ -47,9 +47,9 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { // filled in BeforeEach var c clientset.Interface - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet - e2eskipper.SkipUnlessMultizone(c) + e2eskipper.SkipUnlessMultizone(ctx, c) }) ginkgo.It("should distribute endpoints evenly", func(ctx context.Context) { @@ -58,10 +58,10 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { img := imageutils.GetE2EImage(imageutils.Agnhost) ports := []v1.ContainerPort{{ContainerPort: int32(portNum)}} dsConf := e2edaemonset.NewDaemonSet("topology-serve-hostname", img, thLabels, nil, nil, ports, "serve-hostname") - ds, err := c.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), dsConf, metav1.CreateOptions{}) + ds, err := c.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, dsConf, metav1.CreateOptions{}) framework.ExpectNoError(err, "error creating DaemonSet") - svc := createServiceReportErr(c, f.Namespace.Name, &v1.Service{ + svc := createServiceReportErr(ctx, c, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "topology-hints", Annotations: map[string]string{ @@ -80,18 +80,18 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { }, }) - err = wait.Poll(5*time.Second, framework.PodStartTimeout, func() (bool, error) { - return e2edaemonset.CheckRunningOnAllNodes(f, ds) + err = wait.PollWithContext(ctx, 5*time.Second, framework.PodStartTimeout, func(ctx context.Context) (bool, error) { + return e2edaemonset.CheckRunningOnAllNodes(ctx, f, ds) }) framework.ExpectNoError(err, "timed out waiting for DaemonSets to be ready") // All Nodes should have same allocatable CPUs. If not, then skip the test. schedulableNodes := map[string]*v1.Node{} - for _, nodeName := range e2edaemonset.SchedulableNodes(c, ds) { + for _, nodeName := range e2edaemonset.SchedulableNodes(ctx, c, ds) { schedulableNodes[nodeName] = nil } - nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Error when listing all Nodes") var lastNodeCPU resource.Quantity firstNode := true @@ -118,8 +118,8 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { framework.Logf("Waiting for %d endpoints to be tracked in EndpointSlices", len(schedulableNodes)) var finalSlices []discoveryv1.EndpointSlice - err = wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) { - slices, listErr := c.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)}) + err = wait.PollWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) { + slices, listErr := c.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)}) if listErr != nil { return false, listErr } @@ -165,7 +165,7 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { } } - podList, err := c.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) podsByZone := map[string]string{} for _, pod := range podList.Items { @@ -184,14 +184,14 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do sleep 1; echo "Date: $(date) Try: ${i}"; curl -q -s --connect-timeout 2 http://%s:80/ ; echo; done`, svc.Name) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = clientPod.Name - e2epod.NewPodClient(f).CreateSync(clientPod) + e2epod.NewPodClient(f).CreateSync(ctx, clientPod) framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone) var logs string - if pollErr := wait.Poll(5*time.Second, e2eservice.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollWithContext(ctx, 5*time.Second, e2eservice.KubeProxyLagTimeout, func(ctx context.Context) (bool, error) { var err error - logs, err = e2epod.GetPodLogs(c, f.Namespace.Name, clientPod.Name, clientPod.Name) + logs, err = e2epod.GetPodLogs(ctx, c, f.Namespace.Name, clientPod.Name, clientPod.Name) framework.ExpectNoError(err) framework.Logf("Pod client logs: %s", logs) diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go index 8082357833e..49889a7ad11 100644 --- a/test/e2e/network/util.go +++ b/test/e2e/network/util.go @@ -18,6 +18,7 @@ package network import ( "bytes" + "context" "fmt" "net" "regexp" @@ -58,10 +59,10 @@ func GetHTTPContent(host string, port int, timeout time.Duration, url string) (s } // GetHTTPContentFromTestContainer returns the content of the given url by HTTP via a test container. -func GetHTTPContentFromTestContainer(config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, dialCmd string) (string, error) { +func GetHTTPContentFromTestContainer(ctx context.Context, config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, dialCmd string) (string, error) { var body string pollFn := func() (bool, error) { - resp, err := config.GetResponseFromTestContainer("http", dialCmd, host, port) + resp, err := config.GetResponseFromTestContainer(ctx, "http", dialCmd, host, port) if err != nil || len(resp.Errors) > 0 || len(resp.Responses) == 0 { return false, nil } @@ -87,14 +88,14 @@ func DescribeSvc(ns string) { // For security reasons, and also to allow clusters to use userspace SCTP implementations, // we require that just creating an SCTP Pod/Service/NetworkPolicy must not do anything // that would cause the sctp kernel module to be loaded. -func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool { +func CheckSCTPModuleLoadedOnNodes(ctx context.Context, f *framework.Framework, nodes *v1.NodeList) bool { hostExec := utils.NewHostExec(f) ginkgo.DeferCleanup(hostExec.Cleanup) re := regexp.MustCompile(`^\s*sctp\s+`) cmd := "lsmod | grep sctp" for _, node := range nodes.Items { framework.Logf("Executing cmd %q on node %v", cmd, node.Name) - result, err := hostExec.IssueCommandWithResult(cmd, &node) + result, err := hostExec.IssueCommandWithResult(ctx, cmd, &node) if err != nil { framework.Logf("sctp module is not loaded or error occurred while executing command %s on node: %v", cmd, err) } @@ -181,7 +182,7 @@ func execHostnameTest(sourcePod v1.Pod, targetAddr, targetHostname string) { } // createSecondNodePortService creates a service with the same selector as config.NodePortService and same HTTP Port -func createSecondNodePortService(f *framework.Framework, config *e2enetwork.NetworkingTestConfig) (*v1.Service, int) { +func createSecondNodePortService(ctx context.Context, f *framework.Framework, config *e2enetwork.NetworkingTestConfig) (*v1.Service, int) { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: secondNodePortSvcName, @@ -200,9 +201,9 @@ func createSecondNodePortService(f *framework.Framework, config *e2enetwork.Netw }, } - createdService := config.CreateService(svc) + createdService := config.CreateService(ctx, svc) - err := framework.WaitForServiceEndpointsNum(f.ClientSet, config.Namespace, secondNodePortSvcName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) + err := framework.WaitForServiceEndpointsNum(ctx, f.ClientSet, config.Namespace, secondNodePortSvcName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", secondNodePortSvcName, config.Namespace) var httpPort int diff --git a/test/e2e/node/apparmor.go b/test/e2e/node/apparmor.go index d3276464b24..8ea55ad4334 100644 --- a/test/e2e/node/apparmor.go +++ b/test/e2e/node/apparmor.go @@ -34,23 +34,23 @@ var _ = SIGDescribe("AppArmor", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("load AppArmor profiles", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipIfAppArmorNotSupported() - e2esecurity.LoadAppArmorProfiles(f.Namespace.Name, f.ClientSet) + e2esecurity.LoadAppArmorProfiles(ctx, f.Namespace.Name, f.ClientSet) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if !ginkgo.CurrentSpecReport().Failed() { return } - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) }) ginkgo.It("should enforce an AppArmor profile", func(ctx context.Context) { - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) + e2esecurity.CreateAppArmorTestPod(ctx, f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) }) ginkgo.It("can disable an AppArmor profile, using unconfined", func(ctx context.Context) { - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), true, true) + e2esecurity.CreateAppArmorTestPod(ctx, f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), true, true) }) }) }) diff --git a/test/e2e/node/crictl.go b/test/e2e/node/crictl.go index 42d80339699..022bbe0f873 100644 --- a/test/e2e/node/crictl.go +++ b/test/e2e/node/crictl.go @@ -39,7 +39,7 @@ var _ = SIGDescribe("crictl", func() { }) ginkgo.It("should be able to run crictl on the node", func(ctx context.Context) { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes) framework.ExpectNoError(err) testCases := []string{ @@ -53,7 +53,7 @@ var _ = SIGDescribe("crictl", func() { for _, node := range nodes.Items { ginkgo.By(fmt.Sprintf("Testing %q on node %q ", testCase, node.GetName())) - res, err := hostExec.Execute(testCase, &node) + res, err := hostExec.Execute(ctx, testCase, &node) framework.ExpectNoError(err) if res.Stdout == "" && res.Stderr == "" { diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 69015399317..d9c0fc35b89 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -70,21 +70,21 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("deleting the pod") return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{}) }) - if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := podClient.Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Failed to create pod: %v", err) } - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err) framework.ExpectEqual(len(pods.Items), 1) ginkgo.By("retrieving the pod") - podWithUID, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + podWithUID, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get pod: %v", err) } @@ -100,7 +100,7 @@ var _ = SIGDescribe("Events", func() { "source": v1.DefaultSchedulerName, }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, options) if err != nil { return false, err } @@ -120,7 +120,7 @@ var _ = SIGDescribe("Events", func() { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) + events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, options) if err != nil { return false, err } diff --git a/test/e2e/node/examples.go b/test/e2e/node/examples.go index 0cdbae3049a..c49c9c80732 100644 --- a/test/e2e/node/examples.go +++ b/test/e2e/node/examples.go @@ -51,17 +51,17 @@ var _ = SIGDescribe("[Feature:Example]", func() { var c clientset.Interface var ns string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name // this test wants powerful permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches - err := e2eauth.BindClusterRoleInNamespace(c.RbacV1(), "edit", f.Namespace.Name, + err := e2eauth.BindClusterRoleInNamespace(ctx, c.RbacV1(), "edit", f.Namespace.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) framework.ExpectNoError(err) - err = e2eauth.WaitForAuthorizationUpdate(c.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(ctx, c.AuthorizationV1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), f.Namespace.Name, "create", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) @@ -80,10 +80,10 @@ var _ = SIGDescribe("[Feature:Example]", func() { var wg sync.WaitGroup passed := true checkRestart := func(podName string, timeout time.Duration) { - err := e2epod.WaitForPodNameRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, c, podName, ns) framework.ExpectNoError(err) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) @@ -125,7 +125,7 @@ var _ = SIGDescribe("[Feature:Example]", func() { ginkgo.By("creating secret and pod") e2ekubectl.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") - err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if secret was read correctly") @@ -142,7 +142,7 @@ var _ = SIGDescribe("[Feature:Example]", func() { ginkgo.By("creating the pod") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") - err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if name and namespace were passed correctly") diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index b7507c645cf..b34853dfb9a 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -57,10 +57,10 @@ const ( // getPodMatches returns a set of pod names on the given node that matches the // podNamePrefix and namespace. -func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String { +func getPodMatches(ctx context.Context, c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String { matches := sets.NewString() framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName) - runningPods, err := e2ekubelet.GetKubeletPods(c, nodeName) + runningPods, err := e2ekubelet.GetKubeletPods(ctx, c, nodeName) if err != nil { framework.Logf("Error checking running pods on %v: %v", nodeName, err) return matches @@ -81,14 +81,14 @@ func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, // information; they are reconstructed by examining the container runtime. In // the scope of this test, we do not expect pod naming conflicts so // podNamePrefix should be sufficient to identify the pods. -func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { - return wait.Poll(pollInterval, timeout, func() (bool, error) { +func waitTillNPodsRunningOnNodes(ctx context.Context, c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { + return wait.PollWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) { matchCh := make(chan sets.String, len(nodeNames)) for _, item := range nodeNames.List() { // Launch a goroutine per node to check the pods running on the nodes. nodeName := item go func() { - matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace) + matchCh <- getPodMatches(ctx, c, nodeName, podNamePrefix, namespace) }() } @@ -125,7 +125,7 @@ func stopNfsServer(serverPod *v1.Pod) { // Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container // will execute the passed in shell cmd. Waits for the pod to start. // Note: the nfs plugin is defined inline, no PV or PVC. -func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod { +func createPodUsingNfs(ctx context.Context, f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod { ginkgo.By("create pod using nfs volume") isPrivileged := true @@ -172,13 +172,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, }, }, } - rtnPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + rtnPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, rtnPod.Name, f.Namespace.Name, framework.PodStartTimeout) // running & ready + err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, rtnPod.Name, f.Namespace.Name, framework.PodStartTimeout) // running & ready framework.ExpectNoError(err) - rtnPod, err = c.CoreV1().Pods(ns).Get(context.TODO(), rtnPod.Name, metav1.GetOptions{}) // return fresh pod + rtnPod, err = c.CoreV1().Pods(ns).Get(ctx, rtnPod.Name, metav1.GetOptions{}) // return fresh pod framework.ExpectNoError(err) return rtnPod } @@ -186,8 +186,8 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, // getHostExternalAddress gets the node for a pod and returns the first External // address. Returns an error if the node the pod is on doesn't have an External // address. -func getHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) +func getHostExternalAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { + node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } @@ -212,13 +212,13 @@ func getHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddr // `ls ` should fail (since that dir was removed). If expectClean is false then we expect // the node is not cleaned up, and thus cmds like `ls ` should succeed. We wait for the // kubelet to be cleaned up, afterwhich an error is reported. -func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) { +func checkPodCleanup(ctx context.Context, c clientset.Interface, pod *v1.Pod, expectClean bool) { timeout := 5 * time.Minute poll := 20 * time.Second podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID)) mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs") // use ip rather than hostname in GCE - nodeIP, err := getHostExternalAddress(c, pod) + nodeIP, err := getHostExternalAddress(ctx, c, pod) framework.ExpectNoError(err) condMsg := "deleted" @@ -244,8 +244,8 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) { for _, test := range tests { framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg) - err = wait.Poll(poll, timeout, func() (bool, error) { - result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider) + err = wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + result, err := e2essh.NodeExec(ctx, nodeIP, test.cmd, framework.TestContext.Provider) framework.ExpectNoError(err) e2essh.LogResult(result) ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0) @@ -296,12 +296,13 @@ var _ = SIGDescribe("kubelet", func() { {podsPerNode: 10, timeout: 1 * time.Minute}, } - ginkgo.BeforeEach(func() { + // Must be called in each It with the context of the test. + start := func(ctx context.Context) { // Use node labels to restrict the pods to be assigned only to the // nodes we observe initially. nodeLabels = make(map[string]string) nodeLabels["kubelet_cleanup"] = "true" - nodes, err := e2enode.GetBoundedReadySchedulableNodes(c, maxNodesToCheck) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, c, maxNodesToCheck) numNodes = len(nodes.Items) framework.ExpectNoError(err) nodeNames = sets.NewString() @@ -318,27 +319,28 @@ var _ = SIGDescribe("kubelet", func() { // While we only use a bounded number of nodes in the test. We need to know // the actual number of nodes in the cluster, to avoid running resourceMonitor // against large clusters. - actualNodes, err := e2enode.GetReadySchedulableNodes(c) + actualNodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) // Start resourceMonitor only in small clusters. if len(actualNodes.Items) <= maxNodesToCheck { resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval) - resourceMonitor.Start() + resourceMonitor.Start(ctx) ginkgo.DeferCleanup(resourceMonitor.Stop) } - }) + } for _, itArg := range deleteTests { name := fmt.Sprintf( "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout) itArg := itArg ginkgo.It(name, func(ctx context.Context) { + start(ctx) totalPods := itArg.podsPerNode * numNodes ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) - err := e2erc.RunRC(testutils.RCConfig{ + err := e2erc.RunRC(ctx, testutils.RCConfig{ Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, @@ -351,14 +353,14 @@ var _ = SIGDescribe("kubelet", func() { // running on the nodes according to kubelet. The timeout is set to // only 30 seconds here because e2erc.RunRC already waited for all pods to // transition to the running status. - err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30) + err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30) framework.ExpectNoError(err) if resourceMonitor != nil { resourceMonitor.LogLatest() } ginkgo.By("Deleting the RC") - e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) + e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its // cache) and returns a list of running pods. Some possible causes of @@ -367,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() { // - a bug in graceful termination (if it is enabled) // - docker slow to delete pods (or resource problems causing slowness) start := time.Now() - err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout) + err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout) framework.ExpectNoError(err) framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), time.Since(start)) @@ -411,15 +413,15 @@ var _ = SIGDescribe("kubelet", func() { }, } - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) - _, nfsServerPod, nfsIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + _, nfsServerPod, nfsIP = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"}) }) - ginkgo.AfterEach(func() { - err := e2epod.DeletePodWithWait(c, pod) + ginkgo.AfterEach(func(ctx context.Context) { + err := e2epod.DeletePodWithWait(ctx, c, pod) framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name) - err = e2epod.DeletePodWithWait(c, nfsServerPod) + err = e2epod.DeletePodWithWait(ctx, c, nfsServerPod) framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name) }) @@ -427,24 +429,24 @@ var _ = SIGDescribe("kubelet", func() { for _, t := range testTbl { t := t ginkgo.It(t.itDescr, func(ctx context.Context) { - pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd) + pod = createPodUsingNfs(ctx, f, c, ns, nfsIP, t.podCmd) ginkgo.By("Stop the NFS server") stopNfsServer(nfsServerPod) ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure") - err := e2epod.DeletePodWithWait(c, pod) + err := e2epod.DeletePodWithWait(ctx, c, pod) framework.ExpectError(err) // pod object is now stale, but is intentionally not nil ginkgo.By("Check if pod's host has been cleaned up -- expect not") - checkPodCleanup(c, pod, false) + checkPodCleanup(ctx, c, pod, false) ginkgo.By("Restart the nfs server") restartNfsServer(nfsServerPod) ginkgo.By("Verify that the deleted client pod is now cleaned up") - checkPodCleanup(c, pod, true) + checkPodCleanup(ctx, c, pod, true) }) } }) @@ -457,8 +459,8 @@ var _ = SIGDescribe("kubelet", func() { nodeNames sets.String ) - ginkgo.BeforeEach(func() { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(c, maxNodesToCheck) + ginkgo.BeforeEach(func(ctx context.Context) { + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, c, maxNodesToCheck) numNodes = len(nodes.Items) framework.ExpectNoError(err) nodeNames = sets.NewString() diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index dedda995878..cb0ea5ba241 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -54,9 +54,9 @@ type resourceTest struct { memLimits e2ekubelet.ResourceUsagePerContainer } -func logPodsOnNodes(c clientset.Interface, nodeNames []string) { +func logPodsOnNodes(ctx context.Context, c clientset.Interface, nodeNames []string) { for _, n := range nodeNames { - podList, err := e2ekubelet.GetKubeletRunningPods(c, n) + podList, err := e2ekubelet.GetKubeletRunningPods(ctx, c, n) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v", n) continue @@ -65,7 +65,7 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) { } } -func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *e2ekubelet.ResourceMonitor, +func runResourceTrackingTest(ctx context.Context, f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *e2ekubelet.ResourceMonitor, expectedCPU map[string]map[float64]float64, expectedMemory e2ekubelet.ResourceUsagePerContainer) { numNodes := nodeNames.Len() totalPods := podsPerNode * numNodes @@ -73,7 +73,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) // TODO: Use a more realistic workload - err := e2erc.RunRC(testutils.RCConfig{ + err := e2erc.RunRC(ctx, testutils.RCConfig{ Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, @@ -93,7 +93,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames // for the current test duration, but we should reclaim the // entries if we plan to monitor longer (e.g., 8 hours). deadline := time.Now().Add(monitoringTime) - for time.Now().Before(deadline) { + for time.Now().Before(deadline) && ctx.Err() == nil { timeLeft := time.Until(deadline) framework.Logf("Still running...%v left", timeLeft) if timeLeft < reportingPeriod { @@ -101,18 +101,18 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames } else { time.Sleep(reportingPeriod) } - logPodsOnNodes(f.ClientSet, nodeNames.List()) + logPodsOnNodes(ctx, f.ClientSet, nodeNames.List()) } ginkgo.By("Reporting overall resource usage") - logPodsOnNodes(f.ClientSet, nodeNames.List()) + logPodsOnNodes(ctx, f.ClientSet, nodeNames.List()) usageSummary, err := rm.GetLatest() framework.ExpectNoError(err) // TODO(random-liu): Remove the original log when we migrate to new perfdash framework.Logf("%s", rm.FormatResourceUsage(usageSummary)) // Log perf result printPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary))) - verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary) + verifyMemoryLimits(ctx, f.ClientSet, expectedMemory, usageSummary) cpuSummary := rm.GetCPUSummary() framework.Logf("%s", rm.FormatCPUSummary(cpuSummary)) @@ -121,10 +121,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames verifyCPULimits(expectedCPU, cpuSummary) ginkgo.By("Deleting the RC") - e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) + e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, rcName) } -func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) { +func verifyMemoryLimits(ctx context.Context, c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) { if expected == nil { return } @@ -147,7 +147,7 @@ func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsage } if len(nodeErrs) > 0 { errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", "))) - heapStats, err := e2ekubelet.GetKubeletHeapStats(c, nodeName) + heapStats, err := e2ekubelet.GetKubeletHeapStats(ctx, c, nodeName) if err != nil { framework.Logf("Unable to get heap stats from %q", nodeName) } else { @@ -202,21 +202,21 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { var om *e2ekubelet.RuntimeOperationMonitor var rm *e2ekubelet.ResourceMonitor - ginkgo.BeforeEach(func() { - nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + ginkgo.BeforeEach(func(ctx context.Context) { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } - om = e2ekubelet.NewRuntimeOperationMonitor(f.ClientSet) + om = e2ekubelet.NewRuntimeOperationMonitor(ctx, f.ClientSet) rm = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingPeriod) - rm.Start() + rm.Start(ctx) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { rm.Stop() - result := om.GetLatestRuntimeOperationErrorRate() + result := om.GetLatestRuntimeOperationErrorRate(ctx) framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result)) }) ginkgo.Describe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() { @@ -267,7 +267,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) ginkgo.It(name, func(ctx context.Context) { - runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) + runResourceTrackingTest(ctx, f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) }) } }) @@ -278,7 +278,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) ginkgo.It(name, func(ctx context.Context) { - runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil) + runResourceTrackingTest(ctx, f, podsPerNode, nodeNames, rm, nil, nil) }) } }) diff --git a/test/e2e/node/mount_propagation.go b/test/e2e/node/mount_propagation.go index 81fc19793eb..9acd621ea14 100644 --- a/test/e2e/node/mount_propagation.go +++ b/test/e2e/node/mount_propagation.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("Mount propagation", func() { ginkgo.DeferCleanup(hostExec.Cleanup) // Pick a node where all pods will run. - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) // Fail the test if the namespace is not set. We expect that the @@ -110,19 +110,19 @@ var _ = SIGDescribe("Mount propagation", func() { hostDir := "/var/lib/kubelet/" + f.Namespace.Name ginkgo.DeferCleanup(func(ctx context.Context) error { cleanCmd := fmt.Sprintf("rm -rf %q", hostDir) - return hostExec.IssueCommand(cleanCmd, node) + return hostExec.IssueCommand(ctx, cleanCmd, node) }) podClient := e2epod.NewPodClient(f) bidirectional := v1.MountPropagationBidirectional - master := podClient.CreateSync(preparePod("master", node, &bidirectional, hostDir)) + master := podClient.CreateSync(ctx, preparePod("master", node, &bidirectional, hostDir)) hostToContainer := v1.MountPropagationHostToContainer - slave := podClient.CreateSync(preparePod("slave", node, &hostToContainer, hostDir)) + slave := podClient.CreateSync(ctx, preparePod("slave", node, &hostToContainer, hostDir)) none := v1.MountPropagationNone - private := podClient.CreateSync(preparePod("private", node, &none, hostDir)) - defaultPropagation := podClient.CreateSync(preparePod("default", node, nil, hostDir)) + private := podClient.CreateSync(ctx, preparePod("private", node, &none, hostDir)) + defaultPropagation := podClient.CreateSync(ctx, preparePod("default", node, nil, hostDir)) // Check that the pods sees directories of each other. This just checks // that they have the same HostPath, not the mount propagation. @@ -130,14 +130,14 @@ var _ = SIGDescribe("Mount propagation", func() { for _, podName := range podNames { for _, dirName := range podNames { cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName) - e2epod.ExecShellInPod(f, podName, cmd) + e2epod.ExecShellInPod(ctx, f, podName, cmd) } } // Each pod mounts one tmpfs to /mnt/test/ and puts a file there. for _, podName := range podNames { cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName) - e2epod.ExecShellInPod(f, podName, cmd) + e2epod.ExecShellInPod(ctx, f, podName, cmd) // unmount tmpfs when the test finishes cmd = fmt.Sprintf("umount /mnt/test/%s", podName) @@ -147,12 +147,12 @@ var _ = SIGDescribe("Mount propagation", func() { // The host mounts one tmpfs to testdir/host and puts a file there so we // can check mount propagation from the host to pods. cmd := fmt.Sprintf("mkdir %[1]q/host; mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir) - err = hostExec.IssueCommand(cmd, node) + err = hostExec.IssueCommand(ctx, cmd, node) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) error { cmd := fmt.Sprintf("umount %q/host", hostDir) - return hostExec.IssueCommand(cmd, node) + return hostExec.IssueCommand(ctx, cmd, node) }) // Now check that mounts are propagated to the right containers. @@ -172,7 +172,7 @@ var _ = SIGDescribe("Mount propagation", func() { for podName, mounts := range expectedMounts { for _, mountName := range dirNames { cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName) - stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(f, podName, cmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, podName, cmd) framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err) msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName) shouldBeVisible := mounts.Has(mountName) @@ -188,7 +188,7 @@ var _ = SIGDescribe("Mount propagation", func() { // Find the kubelet PID to ensure we're working with the kubelet's mount namespace cmd = "pidof kubelet" - kubeletPid, err := hostExec.IssueCommandWithResult(cmd, node) + kubeletPid, err := hostExec.IssueCommandWithResult(ctx, cmd, node) framework.ExpectNoError(err, "Checking kubelet pid") kubeletPid = strings.TrimSuffix(kubeletPid, "\n") framework.ExpectEqual(strings.Count(kubeletPid, " "), 0, "kubelet should only have a single PID in the system (pidof returned %q)", kubeletPid) @@ -197,7 +197,7 @@ var _ = SIGDescribe("Mount propagation", func() { // Check that the master and host mounts are propagated to the container runtime's mount namespace for _, mountName := range []string{"host", master.Name} { cmd := fmt.Sprintf("%s cat \"%s/%s/file\"", enterKubeletMountNS, hostDir, mountName) - output, err := hostExec.IssueCommandWithResult(cmd, node) + output, err := hostExec.IssueCommandWithResult(ctx, cmd, node) framework.ExpectNoError(err, "host container namespace should see mount from %s: %s", mountName, output) output = strings.TrimSuffix(output, "\n") framework.ExpectEqual(output, mountName, "host container namespace should see mount contents from %s", mountName) @@ -206,7 +206,7 @@ var _ = SIGDescribe("Mount propagation", func() { // Check that the slave, private, and default mounts are not propagated to the container runtime's mount namespace for _, podName := range []string{slave.Name, private.Name, defaultPropagation.Name} { cmd := fmt.Sprintf("%s test ! -e \"%s/%s/file\"", enterKubeletMountNS, hostDir, podName) - output, err := hostExec.IssueCommandWithResult(cmd, node) + output, err := hostExec.IssueCommandWithResult(ctx, cmd, node) framework.ExpectNoError(err, "host container namespace shouldn't see mount from %s: %s", podName, output) } }) diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 1254bf6f0d3..af5e7914e1f 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -51,17 +51,17 @@ var _ = SIGDescribe("NodeProblemDetector", func() { f := framework.NewDefaultFramework("node-problem-detector") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu") - e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute) + e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) }) ginkgo.It("should run without error", func(ctx context.Context) { ginkgo.By("Getting all nodes and their SSH-able IP addresses") - readyNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + readyNodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) nodes := []v1.Node{} @@ -110,7 +110,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() { workingSetStats[host] = []float64{} cmd := "systemctl status node-problem-detector.service" - result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) isStandaloneMode[host] = (err == nil && result.Code == 0) if isStandaloneMode[host] { @@ -119,14 +119,14 @@ var _ = SIGDescribe("NodeProblemDetector", func() { // showing up, because string text "[n]ode-problem-detector" does not // match regular expression "[n]ode-problem-detector". psCmd := "ps aux | grep [n]ode-problem-detector" - result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, psCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector")) ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host)) journalctlCmd := "sudo journalctl -r -u node-problem-detector" - result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, journalctlCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed")) @@ -171,7 +171,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() { } } - cpuUsage, uptime := getCPUStat(f, host) + cpuUsage, uptime := getCPUStat(ctx, f, host) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) uptimeStats[host] = append(uptimeStats[host], uptime) @@ -179,7 +179,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() { ginkgo.By(fmt.Sprintf("Inject log to trigger DockerHung on node %q", host)) log := "INFO: task docker:12345 blocked for more than 120 seconds." injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\"" - result, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, injectLogCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) } @@ -187,14 +187,14 @@ var _ = SIGDescribe("NodeProblemDetector", func() { ginkgo.By("Check node-problem-detector can post conditions and events to API server") for _, node := range nodes { ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name)) - gomega.Eventually(func() error { - return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "DockerHung", node.Name) + gomega.Eventually(ctx, func() error { + return verifyNodeCondition(ctx, f, "KernelDeadlock", v1.ConditionTrue, "DockerHung", node.Name) }, pollTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Check node-problem-detector posted DockerHung event on node %q", node.Name)) eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()} - gomega.Eventually(func() error { - return verifyEvents(f, eventListOptions, 1, "DockerHung", node.Name) + gomega.Eventually(ctx, func(ctx context.Context) error { + return verifyEvents(ctx, f, eventListOptions, 1, "DockerHung", node.Name) }, pollTimeout, pollInterval).Should(gomega.Succeed()) if checkForKubeletStart { @@ -204,8 +204,8 @@ var _ = SIGDescribe("NodeProblemDetector", func() { // // Some test suites run for hours and KubeletStart event will already be cleaned up ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KubeletStart event on node %q", node.Name)) - gomega.Eventually(func() error { - return verifyEventExists(f, eventListOptions, "KubeletStart", node.Name) + gomega.Eventually(ctx, func(ctx context.Context) error { + return verifyEventExists(ctx, f, eventListOptions, "KubeletStart", node.Name) }, pollTimeout, pollInterval).Should(gomega.Succeed()) } else { ginkgo.By("KubeletStart event will NOT be checked") @@ -217,16 +217,16 @@ var _ = SIGDescribe("NodeProblemDetector", func() { for i := 1; i <= numIterations; i++ { for j, host := range hosts { if isStandaloneMode[host] { - rss, workingSet := getMemoryStat(f, host) + rss, workingSet := getMemoryStat(ctx, f, host) rssStats[host] = append(rssStats[host], rss) workingSetStats[host] = append(workingSetStats[host], workingSet) if i == numIterations { - cpuUsage, uptime := getCPUStat(f, host) + cpuUsage, uptime := getCPUStat(ctx, f, host) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) uptimeStats[host] = append(uptimeStats[host], uptime) } } else { - cpuUsage, rss, workingSet := getNpdPodStat(f, nodes[j].Name) + cpuUsage, rss, workingSet := getNpdPodStat(ctx, f, nodes[j].Name) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) rssStats[host] = append(rssStats[host], rss) workingSetStats[host] = append(workingSetStats[host], workingSet) @@ -263,8 +263,8 @@ var _ = SIGDescribe("NodeProblemDetector", func() { }) }) -func verifyEvents(f *framework.Framework, options metav1.ListOptions, num int, reason, nodeName string) error { - events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(context.TODO(), options) +func verifyEvents(ctx context.Context, f *framework.Framework, options metav1.ListOptions, num int, reason, nodeName string) error { + events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(ctx, options) if err != nil { return err } @@ -281,8 +281,8 @@ func verifyEvents(f *framework.Framework, options metav1.ListOptions, num int, r return nil } -func verifyEventExists(f *framework.Framework, options metav1.ListOptions, reason, nodeName string) error { - events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(context.TODO(), options) +func verifyEventExists(ctx context.Context, f *framework.Framework, options metav1.ListOptions, reason, nodeName string) error { + events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(ctx, options) if err != nil { return err } @@ -294,8 +294,8 @@ func verifyEventExists(f *framework.Framework, options metav1.ListOptions, reaso return fmt.Errorf("Event %s does not exist: %v", reason, events.Items) } -func verifyNodeCondition(f *framework.Framework, condition v1.NodeConditionType, status v1.ConditionStatus, reason, nodeName string) error { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func verifyNodeCondition(ctx context.Context, f *framework.Framework, condition v1.NodeConditionType, status v1.ConditionStatus, reason, nodeName string) error { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -309,17 +309,17 @@ func verifyNodeCondition(f *framework.Framework, condition v1.NodeConditionType, return nil } -func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64) { +func getMemoryStat(ctx context.Context, f *framework.Framework, host string) (rss, workingSet float64) { var memCmd string - isCgroupV2 := isHostRunningCgroupV2(f, host) + isCgroupV2 := isHostRunningCgroupV2(ctx, f, host) if isCgroupV2 { memCmd = "cat /sys/fs/cgroup/system.slice/node-problem-detector.service/memory.current && cat /sys/fs/cgroup/system.slice/node-problem-detector.service/memory.stat" } else { memCmd = "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat" } - result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, memCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) lines := strings.Split(result.Stdout, "\n") @@ -365,15 +365,15 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64 return } -func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) { +func getCPUStat(ctx context.Context, f *framework.Framework, host string) (usage, uptime float64) { var cpuCmd string - if isHostRunningCgroupV2(f, host) { + if isHostRunningCgroupV2(ctx, f, host) { cpuCmd = " cat /sys/fs/cgroup/cpu.stat | grep 'usage_usec' | sed 's/[^0-9]*//g' && cat /proc/uptime | awk '{print $1}'" } else { cpuCmd = "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" } - result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cpuCmd, host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) lines := strings.Split(result.Stdout, "\n") @@ -388,8 +388,8 @@ func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) { return } -func isHostRunningCgroupV2(f *framework.Framework, host string) bool { - result, err := e2essh.SSH("stat -fc %T /sys/fs/cgroup/", host, framework.TestContext.Provider) +func isHostRunningCgroupV2(ctx context.Context, f *framework.Framework, host string) bool { + result, err := e2essh.SSH(ctx, "stat -fc %T /sys/fs/cgroup/", host, framework.TestContext.Provider) framework.ExpectNoError(err) framework.ExpectEqual(result.Code, 0) @@ -398,8 +398,8 @@ func isHostRunningCgroupV2(f *framework.Framework, host string) bool { return strings.Contains(result.Stdout, "cgroup2") || strings.Contains(result.Stdout, "0x63677270") } -func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, workingSet float64) { - summary, err := e2ekubelet.GetStatsSummary(f.ClientSet, nodeName) +func getNpdPodStat(ctx context.Context, f *framework.Framework, nodeName string) (cpuUsage, rss, workingSet float64) { + summary, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, nodeName) framework.ExpectNoError(err) hasNpdPod := false diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index e692e8824ce..58bc0d04d4a 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -41,13 +41,13 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" ginkgo.It("should handle the creation of 1000 pods", func(ctx context.Context) { var count int for count < 1000 { - pod, err := createTerminatingPod(f) + pod, err := createTerminatingPod(ctx, f) if err != nil { framework.Failf("err creating pod: %v", err) } pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(ctx, pod, metav1.UpdateOptions{}) if err != nil { framework.Failf("err failing pod: %v", err) } @@ -69,7 +69,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" ginkgo.By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { - pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pod %v", err) return false, nil @@ -86,7 +86,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" }) }) -func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) { +func createTerminatingPod(ctx context.Context, f *framework.Framework) (*v1.Pod, error) { uuid := uuid.NewUUID() pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -102,5 +102,5 @@ func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) { SchedulerName: "please don't schedule my pods", }, } - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) } diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 8f281c34764..62728516654 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -78,31 +78,31 @@ var _ = SIGDescribe("Pods Extended", func() { ginkgo.By("setting up selector") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(context.TODO(), options) + pods, err := podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 0) ginkgo.By("submitting the pod to kubernetes") - podClient.Create(pod) + podClient.Create(ctx, pod) ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 1) // We need to wait for the pod to be running, otherwise the deletion // may be carried out immediately rather than gracefully. - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) // save the running pod - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") var lastPod v1.Pod var statusCode int - err = f.ClientSet.CoreV1().RESTClient().Delete().AbsPath("/api/v1/namespaces", pod.Namespace, "pods", pod.Name).Param("gracePeriodSeconds", "30").Do(context.TODO()).StatusCode(&statusCode).Into(&lastPod) + err = f.ClientSet.CoreV1().RESTClient().Delete().AbsPath("/api/v1/namespaces", pod.Namespace, "pods", pod.Name).Param("gracePeriodSeconds", "30").Do(ctx).StatusCode(&statusCode).Into(&lastPod) framework.ExpectNoError(err, "failed to use http client to send delete") framework.ExpectEqual(statusCode, http.StatusOK, "failed to delete gracefully by client request") @@ -113,7 +113,7 @@ var _ = SIGDescribe("Pods Extended", func() { // latency between termination and reportal can be isolated further. start := time.Now() err = wait.Poll(time.Second*5, time.Second*30*3, func() (bool, error) { - podList, err := e2ekubelet.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) + podList, err := e2ekubelet.GetKubeletPods(ctx, f.ClientSet, pod.Spec.NodeName) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) return false, nil @@ -140,7 +140,7 @@ var _ = SIGDescribe("Pods Extended", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(context.TODO(), options) + pods, err = podClient.List(ctx, options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) @@ -190,10 +190,10 @@ var _ = SIGDescribe("Pods Extended", func() { } ginkgo.By("submitting the pod to kubernetes") - podClient.Create(pod) + podClient.Create(ctx, pod) ginkgo.By("verifying QOS class is set on the pod") - pod, err := podClient.Get(context.TODO(), name, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(pod.Status.QOSClass, v1.PodQOSGuaranteed) }) @@ -207,7 +207,7 @@ var _ = SIGDescribe("Pods Extended", func() { ginkgo.It("should never report success for a pending container", func(ctx context.Context) { ginkgo.By("creating pods that should always exit 1 and terminating the pod after a random delay") - createAndTestPodRepeatedly( + createAndTestPodRepeatedly(ctx, 3, 15, podFastDeleteScenario{client: podClient.PodInterface, delayMs: 2000}, podClient.PodInterface, @@ -215,7 +215,7 @@ var _ = SIGDescribe("Pods Extended", func() { }) ginkgo.It("should never report container start when an init container fails", func(ctx context.Context) { ginkgo.By("creating pods with an init container that always exit 1 and terminating the pod after a random delay") - createAndTestPodRepeatedly( + createAndTestPodRepeatedly(ctx, 3, 15, podFastDeleteScenario{client: podClient.PodInterface, delayMs: 2000, initContainer: true}, podClient.PodInterface, @@ -262,13 +262,13 @@ var _ = SIGDescribe("Pods Extended", func() { } ginkgo.By("submitting the pod to kubernetes") - createdPod := podClient.Create(pod) + createdPod := podClient.Create(ctx, pod) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the pod") return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{}) }) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) var eventList *v1.EventList var err error @@ -281,7 +281,7 @@ var _ = SIGDescribe("Pods Extended", func() { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - eventList, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) + eventList, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, options) if err != nil { return false, err } @@ -327,13 +327,13 @@ var _ = SIGDescribe("Pods Extended", func() { } ginkgo.By("submitting the pod to kubernetes") - podClient.Create(pod) + podClient.Create(ctx, pod) ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By("deleting the pod") return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{}) }) - err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "Evicted", f.Namespace.Name) + err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, pod.Name, "Evicted", f.Namespace.Name) if err != nil { framework.Failf("error waiting for pod to be evicted: %v", err) } @@ -343,7 +343,7 @@ var _ = SIGDescribe("Pods Extended", func() { }) -func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, podClient v1core.PodInterface) { +func createAndTestPodRepeatedly(ctx context.Context, workers, iterations int, scenario podScenario, podClient v1core.PodInterface) { var ( lock sync.Mutex errs []error @@ -373,7 +373,7 @@ func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, p // create the pod, capture the change events, then delete the pod start := time.Now() - created, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) + created, err := podClient.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") ch := make(chan []watch.Event) @@ -381,7 +381,7 @@ func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, p go func() { defer ginkgo.GinkgoRecover() defer close(ch) - w, err := podClient.Watch(context.TODO(), metav1.ListOptions{ + w, err := podClient.Watch(ctx, metav1.ListOptions{ ResourceVersion: created.ResourceVersion, FieldSelector: fmt.Sprintf("metadata.name=%s", pod.Name), }) @@ -412,7 +412,7 @@ func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, p case <-waitForWatch: // when the watch is established } - verifier, scenario, err := scenario.Action(pod) + verifier, scenario, err := scenario.Action(ctx, pod) framework.ExpectNoError(err, "failed to take action") var ( @@ -483,7 +483,7 @@ func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, p type podScenario interface { Pod(worker, attempt int) *v1.Pod - Action(*v1.Pod) (podScenarioVerifier, string, error) + Action(context.Context, *v1.Pod) (podScenarioVerifier, string, error) IsLastEvent(event watch.Event) bool } @@ -510,11 +510,11 @@ func (s podFastDeleteScenario) IsLastEvent(event watch.Event) bool { return false } -func (s podFastDeleteScenario) Action(pod *v1.Pod) (podScenarioVerifier, string, error) { +func (s podFastDeleteScenario) Action(ctx context.Context, pod *v1.Pod) (podScenarioVerifier, string, error) { t := time.Duration(rand.Intn(s.delayMs)) * time.Millisecond scenario := fmt.Sprintf("t=%s", t) time.Sleep(t) - return &podStartVerifier{pod: pod}, scenario, s.client.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + return &podStartVerifier{pod: pod}, scenario, s.client.Delete(ctx, pod.Name, metav1.DeleteOptions{}) } func (s podFastDeleteScenario) Pod(worker, attempt int) *v1.Pod { diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 536588c0d04..bf60d0b4c27 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -43,11 +43,11 @@ type State struct { Received map[string]int } -func testPreStop(c clientset.Interface, ns string) { +func testPreStop(ctx context.Context, c clientset.Interface, ns string) { // This is the server that will receive the preStop notification podDescr := e2epod.NewAgnhostPod(ns, "server", nil, nil, []v1.ContainerPort{{ContainerPort: 8080}}, "nettest") ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - podDescr, err := c.CoreV1().Pods(ns).Create(context.TODO(), podDescr, metav1.CreateOptions{}) + podDescr, err := c.CoreV1().Pods(ns).Create(ctx, podDescr, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. @@ -57,12 +57,12 @@ func testPreStop(c clientset.Interface, ns string) { }) ginkgo.By("Waiting for pods to come up.") - err = e2epod.WaitForPodRunningInNamespace(c, podDescr) + err = e2epod.WaitForPodRunningInNamespace(ctx, c, podDescr) framework.ExpectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" - podOut, err := c.CoreV1().Pods(ns).Get(context.TODO(), podDescr.Name, metav1.GetOptions{}) + podOut, err := c.CoreV1().Pods(ns).Get(ctx, podDescr.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting pod info") podURL := net.JoinHostPort(podOut.Status.PodIP, "8080") @@ -92,7 +92,7 @@ func testPreStop(c clientset.Interface, ns string) { } ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - preStopDescr, err = c.CoreV1().Pods(ns).Create(context.TODO(), preStopDescr, metav1.CreateOptions{}) + preStopDescr, err = c.CoreV1().Pods(ns).Create(ctx, preStopDescr, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -105,12 +105,12 @@ func testPreStop(c clientset.Interface, ns string) { return nil }) - err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr) + err = e2epod.WaitForPodRunningInNamespace(ctx, c, preStopDescr) framework.ExpectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. ginkgo.By("Deleting pre-stop pod") - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, metav1.DeleteOptions{}); err == nil { + if err := c.CoreV1().Pods(ns).Delete(ctx, preStopDescr.Name, metav1.DeleteOptions{}); err == nil { deletePreStop = false } framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) @@ -118,7 +118,7 @@ func testPreStop(c clientset.Interface, ns string) { // Validate that the server received the web poke. err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) defer cancel() var body []byte @@ -167,7 +167,7 @@ var _ = SIGDescribe("PreStop", func() { Description: Create a server pod with a rest endpoint '/write' that changes state.Received field. Create a Pod with a pre-stop handle that posts to the /write endpoint on the server Pod. Verify that the Pod with pre-stop hook is running. Delete the Pod with the pre-stop hook. Before the Pod is deleted, pre-stop handler MUST be called when configured. Verify that the Pod is deleted and a call to prestop hook is verified by checking the status received on the server Pod. */ framework.ConformanceIt("should call prestop when killing a pod ", func(ctx context.Context) { - testPreStop(f.ClientSet, f.Namespace.Name) + testPreStop(ctx, f.ClientSet, f.Namespace.Name) }) ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func(ctx context.Context) { @@ -177,17 +177,17 @@ var _ = SIGDescribe("PreStop", func() { pod := getPodWithpreStopLifeCycle(name) ginkgo.By("submitting the pod to kubernetes") - podClient.Create(pod) + podClient.Create(ctx, pod) ginkgo.By("waiting for pod running") - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) var err error - pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) + err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) framework.ExpectNoError(err, "failed to delete pod") // wait for less than the gracePeriod termination ensuring the @@ -197,7 +197,7 @@ var _ = SIGDescribe("PreStop", func() { ginkgo.By("verifying the pod is running while in the graceful period termination") result := &v1.PodList{} err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { - client, err := e2ekubelet.ProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort) + client, err := e2ekubelet.ProxyRequest(ctx, f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort) framework.ExpectNoError(err, "failed to get the pods of the node") err = client.Into(result) framework.ExpectNoError(err, "failed to parse the pods of the node") diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index 87a3f6e4103..c46a7329584 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -53,14 +53,14 @@ var _ = SIGDescribe("RuntimeClass", func() { runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) + rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, runtimeClass, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := e2enode.NewRuntimeClassPod(rc.GetName()) pod.Spec.NodeSelector = map[string]string{ labelFooName: "bar", } - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("expected 'forbidden' as error, got instead: %v", err) } @@ -70,7 +70,7 @@ var _ = SIGDescribe("RuntimeClass", func() { labelFooName := "foo-" + string(uuid.NewUUID()) labelFizzName := "fizz-" + string(uuid.NewUUID()) - nodeName := scheduling.GetNodeThatCanRunPod(f) + nodeName := scheduling.GetNodeThatCanRunPod(ctx, f) nodeSelector := map[string]string{ labelFooName: "bar", labelFizzName: "buzz", @@ -91,7 +91,7 @@ var _ = SIGDescribe("RuntimeClass", func() { ginkgo.By("Trying to apply a label on the found node.") for key, value := range nodeSelector { e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) - e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, nodeName, key, value) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key) } @@ -101,26 +101,26 @@ var _ = SIGDescribe("RuntimeClass", func() { Value: "bar", Effect: v1.TaintEffectNoSchedule, } - e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint) - e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint) + e2enode.AddOrUpdateTaintOnNode(ctx, f.ClientSet, nodeName, taint) + e2enode.ExpectNodeHasTaint(ctx, f.ClientSet, nodeName, &taint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint) ginkgo.By("Trying to create runtimeclass and pod") runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) + rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, runtimeClass, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := e2enode.NewRuntimeClassPod(rc.GetName()) pod.Spec.NodeSelector = map[string]string{ labelFooName: "bar", } - pod = e2epod.NewPodClient(f).Create(pod) + pod = e2epod.NewPodClient(f).Create(ctx, pod) - framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, f.Namespace.Name, pod.Name)) // check that pod got scheduled on specified node. - scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName) framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector) @@ -135,7 +135,7 @@ var _ = SIGDescribe("RuntimeClass", func() { labelFooName := "foo-" + string(uuid.NewUUID()) labelFizzName := "fizz-" + string(uuid.NewUUID()) - nodeName := scheduling.GetNodeThatCanRunPod(f) + nodeName := scheduling.GetNodeThatCanRunPod(ctx, f) nodeSelector := map[string]string{ labelFooName: "bar", labelFizzName: "buzz", @@ -147,26 +147,26 @@ var _ = SIGDescribe("RuntimeClass", func() { ginkgo.By("Trying to apply a label on the found node.") for key, value := range nodeSelector { e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) - e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, nodeName, key, value) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key) } ginkgo.By("Trying to create runtimeclass and pod") runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) + rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, runtimeClass, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := e2enode.NewRuntimeClassPod(rc.GetName()) pod.Spec.NodeSelector = map[string]string{ labelFooName: "bar", } - pod = e2epod.NewPodClient(f).Create(pod) + pod = e2epod.NewPodClient(f).Create(ctx, pod) - framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, f.Namespace.Name, pod.Name)) // check that pod got scheduled on specified node. - scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName) framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector) diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 6072c40e0cc..87b7dcc163c 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -74,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} groups := []string{"1234", "5678"} - e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) + e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) }) ginkgo.When("if the container's primary UID belongs to some groups in the image [LinuxOnly]", func() { @@ -100,6 +100,7 @@ var _ = SIGDescribe("Security Context", func() { // $ id -G // 1000 50000 60000 e2eoutput.TestContainerOutput( + ctx, f, "pod.Spec.SecurityContext.SupplementalGroups with pre-defined-group in the image", pod, 0, @@ -114,7 +115,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.SecurityContext.RunAsUser = &userID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", userID), fmt.Sprintf("gid=%v", 0), }) @@ -134,7 +135,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.SecurityContext.RunAsGroup = &groupID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", userID), fmt.Sprintf("gid=%v", groupID), }) @@ -149,7 +150,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("gid=%v", 0), }) @@ -174,22 +175,22 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("gid=%v", overrideGroupID), }) }) ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func(ctx context.Context) { - testPodSELinuxLabeling(f, false, false) + testPodSELinuxLabeling(ctx, f, false, false) }) ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func(ctx context.Context) { - testPodSELinuxLabeling(f, true, false) + testPodSELinuxLabeling(ctx, f, true, false) }) ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func(ctx context.Context) { - testPodSELinuxLabeling(f, false, true) + testPodSELinuxLabeling(ctx, f, false, true) }) ginkgo.It("should support seccomp unconfined on the container [LinuxOnly]", func(ctx context.Context) { @@ -197,31 +198,31 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - e2eoutput.TestContainerOutput(f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(ctx, f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled }) ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - e2eoutput.TestContainerOutput(f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(ctx, f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled }) ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - e2eoutput.TestContainerOutput(f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered + e2eoutput.TestContainerOutput(ctx, f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered }) ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - e2eoutput.TestContainerOutput(f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(ctx, f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled }) }) -func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) { +func testPodSELinuxLabeling(ctx context.Context, f *framework.Framework, hostIPC bool, hostPID bool) { // Write and read a file with an empty_dir volume // with a pod with the MCS label s0:c0,c1 pod := scTestPod(hostIPC, hostPID) @@ -249,10 +250,10 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.Containers[0].Command = []string{"sleep", "6000"} client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - pod, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := client.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod %v", pod) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) testContent := "hello" testFilePath := mountPath + "/TEST" @@ -263,7 +264,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) framework.ExpectNoError(err) gomega.Expect(content).To(gomega.ContainSubstring(testContent)) - foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Confirm that the file can be accessed from a second @@ -294,7 +295,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ Level: "s0:c0,c1", } - e2eoutput.TestContainerOutput(f, "Pod with same MCS label reading test file", pod, 0, []string{testContent}) + e2eoutput.TestContainerOutput(ctx, f, "Pod with same MCS label reading test file", pod, 0, []string{testContent}) // Confirm that the same pod with a different MCS // label cannot access the volume @@ -306,10 +307,10 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ Level: "s0:c2,c3", } - _, err = client.Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = client.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod %v", pod) - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) // for this to work, SELinux should be in enforcing mode, so let's check that diff --git a/test/e2e/node/ssh.go b/test/e2e/node/ssh.go index 7112172ed90..14d0e6820e5 100644 --- a/test/e2e/node/ssh.go +++ b/test/e2e/node/ssh.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("SSH", func() { ginkgo.It("should SSH to all nodes and run commands", func(ctx context.Context) { // Get all nodes' external IPs. ginkgo.By("Getting all nodes' SSH-able IP addresses") - hosts, err := e2essh.NodeSSHHosts(f.ClientSet) + hosts, err := e2essh.NodeSSHHosts(ctx, f.ClientSet) if err != nil { framework.Failf("Error getting node hostnames: %v", err) } @@ -85,7 +85,7 @@ var _ = SIGDescribe("SSH", func() { for _, host := range testhosts { ginkgo.By(fmt.Sprintf("SSH'ing host %s", host)) - result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, testCase.cmd, host, framework.TestContext.Provider) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) if err != testCase.expectedError { framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) @@ -111,7 +111,7 @@ var _ = SIGDescribe("SSH", func() { // Quickly test that SSH itself errors correctly. ginkgo.By("SSH'ing to a nonexistent host") - if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { + if _, err = e2essh.SSH(ctx, `echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { framework.Failf("Expected error trying to SSH to nonexistent host.") } }) diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index 5308e5fb19e..325e7c9f933 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -122,17 +122,17 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, // Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new // struct into observedDeletion channel for every deletion it sees. -func createTestController(cs clientset.Interface, observedDeletions chan string, stopCh chan struct{}, podLabel, ns string) { +func createTestController(ctx context.Context, cs clientset.Interface, observedDeletions chan string, stopCh chan struct{}, podLabel, ns string) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String() - obj, err := cs.CoreV1().Pods(ns).List(context.TODO(), options) + obj, err := cs.CoreV1().Pods(ns).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String() - return cs.CoreV1().Pods(ns).Watch(context.TODO(), options) + return cs.CoreV1().Pods(ns).Watch(ctx, options) }, }, &v1.Pod{}, @@ -167,13 +167,13 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { f := framework.NewDefaultFramework("taint-single-pod") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name - e2enode.WaitForTotalHealthy(cs, time.Minute) + e2enode.WaitForTotalHealthy(ctx, cs, time.Minute) - err := framework.CheckTestingNSDeletedExcept(cs, ns) + err := framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) }) @@ -185,17 +185,17 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { pod := createPodForTaintsTest(false, 0, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podName, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podName, ns) ginkgo.By("Starting pod...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod is running on %v. Tainting Node", nodeName) ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) // Wait a bit @@ -217,17 +217,17 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { pod := createPodForTaintsTest(true, 0, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podName, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podName, ns) ginkgo.By("Starting pod...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod is running on %v. Tainting Node", nodeName) ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) // Wait a bit @@ -250,17 +250,17 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { pod := createPodForTaintsTest(true, kubeletPodDeletionDelaySeconds+2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podName, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podName, ns) ginkgo.By("Starting pod...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod is running on %v. Tainting Node", nodeName) ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) // Wait a bit @@ -295,23 +295,23 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podName, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podName, ns) // 1. Run a pod with short toleration ginkgo.By("Starting pod...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod is running on %v. Tainting Node", nodeName) // 2. Taint the node running this pod with a no-execute taint ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) taintRemoved := false ginkgo.DeferCleanup(func(ctx context.Context) { if !taintRemoved { - e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) + e2enode.RemoveTaintOffNode(ctx, cs, nodeName, testTaint) } }) @@ -328,7 +328,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 4. Remove the taint framework.Logf("Removing taint from Node") - e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) + e2enode.RemoveTaintOffNode(ctx, cs, nodeName, testTaint) taintRemoved = true // 5. See if Pod won't be evicted. @@ -352,7 +352,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { pod.Finalizers = append(pod.Finalizers, testFinalizer) ginkgo.By("Starting pod...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod is running on %v. Tainting Node", nodeName) @@ -360,17 +360,17 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) ginkgo.By("Waiting for Pod to be terminating") timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second - err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, timeout) + err = e2epod.WaitForPodTerminatingInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, timeout) framework.ExpectNoError(err) ginkgo.By("Verifying the pod has the pod disruption condition") - e2epod.VerifyPodHasConditionWithType(f, pod, v1.DisruptionTarget) + e2epod.VerifyPodHasConditionWithType(ctx, f, pod, v1.DisruptionTarget) }) }) @@ -380,13 +380,13 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { f := framework.NewDefaultFramework("taint-multiple-pods") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name - e2enode.WaitForTotalHealthy(cs, time.Minute) + e2enode.WaitForTotalHealthy(ctx, cs, time.Minute) - err := framework.CheckTestingNSDeletedExcept(cs, ns) + err := framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) }) @@ -397,27 +397,27 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { podGroup := "taint-eviction-a" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podGroup, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podGroup, ns) pod1 := createPodForTaintsTest(false, 0, podGroup+"1", podGroup, ns) pod2 := createPodForTaintsTest(true, 0, podGroup+"2", podGroup, ns) ginkgo.By("Starting pods...") - nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) + nodeName1, err := testutils.RunPodAndGetNodeName(ctx, cs, pod1, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod1 is running on %v. Tainting Node", nodeName1) - nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute) + nodeName2, err := testutils.RunPodAndGetNodeName(ctx, cs, pod2, 2*time.Minute) framework.ExpectNoError(err) framework.Logf("Pod2 is running on %v. Tainting Node", nodeName2) ginkgo.By("Trying to apply a taint on the Nodes") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName1, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName1, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName1, testTaint) if nodeName2 != nodeName1 { - e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName2, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName2, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName2, testTaint) } @@ -456,16 +456,16 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { podGroup := "taint-eviction-b" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) - createTestController(cs, observedDeletions, stopCh, podGroup, ns) + createTestController(ctx, cs, observedDeletions, stopCh, podGroup, ns) // 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25 pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) ginkgo.By("Starting pods...") - nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) + nodeName, err := testutils.RunPodAndGetNodeName(ctx, cs, pod1, 2*time.Minute) framework.ExpectNoError(err) - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] if !ok { @@ -475,18 +475,18 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.Logf("Pod1 is running on %v. Tainting Node", nodeName) // ensure pod2 lands on the same node as pod1 pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeHostNameLabel} - _, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute) + _, err = testutils.RunPodAndGetNodeName(ctx, cs, pod2, 2*time.Minute) framework.ExpectNoError(err) // Wait for pods to be running state before eviction happens - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod1)) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod2)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod1)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod2)) framework.Logf("Pod2 is running on %v. Tainting Node", nodeName) // 2. Taint the nodes running those pods with a no-execute taint ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) // 3. Wait to see if both pods get evicted in between [5, 25] seconds diff --git a/test/e2e/scheduling/events.go b/test/e2e/scheduling/events.go index 27cb1a8012d..a6df5ea0750 100644 --- a/test/e2e/scheduling/events.go +++ b/test/e2e/scheduling/events.go @@ -53,11 +53,12 @@ func scheduleFailureEvent(podName string) func(*v1.Event) bool { } // Action is a function to be performed by the system. -type Action func() error +type Action func(ctx context.Context) error // observeEventAfterAction returns true if an event matching the predicate was emitted // from the system after performing the supplied action. -func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate func(*v1.Event) bool, action Action) (bool, error) { +func observeEventAfterAction(ctx context.Context, c clientset.Interface, ns string, eventPredicate func(*v1.Event) bool, action Action) (bool, error) { + // TODO (pohly): add context support observedMatchingEvent := false informerStartedChan := make(chan struct{}) var informerStartedGuard sync.Once @@ -66,13 +67,13 @@ func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate fu _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - ls, err := c.CoreV1().Events(ns).List(context.TODO(), options) + ls, err := c.CoreV1().Events(ns).List(ctx, options) return ls, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // Signal parent goroutine that watching has begun. defer informerStartedGuard.Do(func() { close(informerStartedChan) }) - w, err := c.CoreV1().Events(ns).Watch(context.TODO(), options) + w, err := c.CoreV1().Events(ns).Watch(ctx, options) return w, err }, }, @@ -99,7 +100,7 @@ func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate fu <-informerStartedChan // Invoke the action function. - err := action() + err := action(ctx) if err != nil { return false, err } @@ -108,7 +109,7 @@ func observeEventAfterAction(c clientset.Interface, ns string, eventPredicate fu // Wait up 2 minutes polling every second. timeout := 2 * time.Minute interval := 1 * time.Second - err = wait.Poll(interval, timeout, func() (bool, error) { + err = wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { return observedMatchingEvent, nil }) return err == nil, err diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 807de255e0e..8cdf6f158a2 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -75,19 +75,19 @@ var _ = SIGDescribe("LimitRange", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), options) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options) framework.ExpectNoError(err, "failed to query for limitRanges") framework.ExpectEqual(len(limitRanges.Items), 0) lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = selector.String() - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), options) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options) return limitRanges, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = selector.String() - return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(ctx, options) }, } _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{}) @@ -100,7 +100,7 @@ var _ = SIGDescribe("LimitRange", func() { } ginkgo.By("Submitting a LimitRange") - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), limitRange, metav1.CreateOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(ctx, limitRange, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange creation was observed") @@ -114,7 +114,7 @@ var _ = SIGDescribe("LimitRange", func() { } ginkgo.By("Fetching the LimitRange to ensure it has proper values") - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(context.TODO(), limitRange.Name, metav1.GetOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{}) framework.ExpectNoError(err) expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} @@ -123,11 +123,11 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with no resource requirements") pod := newTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for i := range pod.Spec.Containers { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) @@ -140,11 +140,11 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with partial resource requirements") pod = newTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", "")) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // This is an interesting case, so it's worth a comment // If you specify a Limit, and no Request, the Limit will default to the Request @@ -161,23 +161,23 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = newTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = newTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Updating a LimitRange") newMin := getResourceList("9m", "49Mi", "49Gi") limitRange.Spec.Limits[0].Min = newMin - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(context.TODO(), limitRange, metav1.UpdateOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(ctx, limitRange, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange updating is effective") err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) { - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(context.TODO(), limitRange.Name, metav1.GetOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil }) @@ -185,21 +185,21 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with less than former min resources") pod = newTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = newTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") - err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, *metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(ctx, limitRange.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("Verifying the LimitRange was deleted") err = wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) { - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Unable to retrieve LimitRanges: %v", err) @@ -222,7 +222,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with more than former max resources") pod = newTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) }) @@ -283,7 +283,7 @@ var _ = SIGDescribe("LimitRange", func() { framework.ExpectNoError(err, "Failed to create limitRange %q", lrName) ginkgo.By("Creating another limitRange in another namespace") - lrNamespace, err := f.CreateNamespace(lrName, nil) + lrNamespace, err := f.CreateNamespace(ctx, lrName, nil) framework.ExpectNoError(err, "failed creating Namespace") framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name) framework.Logf(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name)) @@ -326,7 +326,7 @@ var _ = SIGDescribe("LimitRange", func() { framework.ExpectNoError(err, "failed to delete the LimitRange by Collection") ginkgo.By(fmt.Sprintf("Confirm that the limitRange %q has been deleted", lrName)) - err = wait.PollImmediate(1*time.Second, 10*time.Second, checkLimitRangeListQuantity(f, patchedLabelSelector, 0)) + err = wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, checkLimitRangeListQuantity(f, patchedLabelSelector, 0)) framework.ExpectNoError(err, "failed to count the required limitRanges") framework.Logf("LimitRange %q has been deleted.", lrName) @@ -425,11 +425,11 @@ func newTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) * } } -func checkLimitRangeListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { - return func() (bool, error) { +func checkLimitRangeListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { framework.Logf("Requesting list of LimitRange to confirm quantity") - list, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label}) + list, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: label}) if err != nil { return false, err } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index d9c0a2040c2..13f3b66d100 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -89,17 +89,17 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod { return testPod } -func logOSImages(f *framework.Framework) { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func logOSImages(ctx context.Context, f *framework.Framework) { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage) } } -func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool { +func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, f *framework.Framework) bool { framework.Logf("Getting list of Nodes from API server") - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { if node.Spec.Unschedulable { @@ -115,8 +115,8 @@ func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool { return true } -func getGPUsAvailable(f *framework.Framework) int64 { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func getGPUsAvailable(ctx context.Context, f *framework.Framework) int64 { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") var gpusAvailable int64 for _, node := range nodeList.Items { @@ -128,8 +128,8 @@ func getGPUsAvailable(f *framework.Framework) int64 { } // SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes -func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2edebug.ContainerResourceGatherer { - logOSImages(f) +func SetupNVIDIAGPUNode(ctx context.Context, f *framework.Framework, setupResourceGatherer bool) *e2edebug.ContainerResourceGatherer { + logOSImages(ctx, f) var err error var ds *appsv1.DaemonSet @@ -137,7 +137,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2e if dsYamlURLFromEnv != "" { // Using DaemonSet from remote URL framework.Logf("Using remote nvidia-driver-installer daemonset manifest from %v", dsYamlURLFromEnv) - ds, err = e2emanifest.DaemonSetFromURL(dsYamlURLFromEnv) + ds, err = e2emanifest.DaemonSetFromURL(ctx, dsYamlURLFromEnv) framework.ExpectNoError(err, "failed get remote") } else { // Using default local DaemonSet @@ -149,14 +149,14 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2e } gpuResourceName = e2egpu.NVIDIAGPUResourceName ds.Namespace = f.Namespace.Name - _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") framework.Logf("Successfully created daemonset to install Nvidia drivers.") - pods, err := e2eresource.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) + pods, err := e2eresource.WaitForControlledPods(ctx, f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") - devicepluginPods, err := e2eresource.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) + devicepluginPods, err := e2eresource.WaitForControlledPods(ctx, f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) if err == nil { framework.Logf("Adding deviceplugin addon pod.") pods.Items = append(pods.Items, devicepluginPods.Items...) @@ -165,15 +165,15 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2e var rsgather *e2edebug.ContainerResourceGatherer if setupResourceGatherer { framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err = e2edebug.NewResourceUsageGatherer(f.ClientSet, e2edebug.ResourceGathererOptions{InKubemark: false, Nodes: e2edebug.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + rsgather, err = e2edebug.NewResourceUsageGatherer(ctx, f.ClientSet, e2edebug.ResourceGathererOptions{InKubemark: false, Nodes: e2edebug.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") - go rsgather.StartGatheringData() + go rsgather.StartGatheringData(ctx) } // Wait for Nvidia GPUs to be available on nodes framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...") - gomega.Eventually(func() bool { - return areGPUsAvailableOnAllSchedulableNodes(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + return areGPUsAvailableOnAllSchedulableNodes(ctx, f) }, driverInstallTimeout, time.Second).Should(gomega.BeTrue()) return rsgather @@ -190,19 +190,19 @@ func getGPUsPerPod() int64 { return gpusPerPod } -func testNvidiaGPUs(f *framework.Framework) { - rsgather := SetupNVIDIAGPUNode(f, true) - gpuPodNum := getGPUsAvailable(f) / getGPUsPerPod() +func testNvidiaGPUs(ctx context.Context, f *framework.Framework) { + rsgather := SetupNVIDIAGPUNode(ctx, f, true) + gpuPodNum := getGPUsAvailable(ctx, f) / getGPUsPerPod() framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum) podList := []*v1.Pod{} for i := int64(0); i < gpuPodNum; i++ { - podList = append(podList, e2epod.NewPodClient(f).Create(makeCudaAdditionDevicePluginTestPod())) + podList = append(podList, e2epod.NewPodClient(f).Create(ctx, makeCudaAdditionDevicePluginTestPod())) } framework.Logf("Wait for all test pods to succeed") // Wait for all pods to succeed for _, pod := range podList { - e2epod.NewPodClient(f).WaitForSuccess(pod.Name, 5*time.Minute) - logContainers(f, pod) + e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.Name, 5*time.Minute) + logContainers(ctx, f, pod) } framework.Logf("Stopping ResourceUsageGather") @@ -213,9 +213,9 @@ func testNvidiaGPUs(f *framework.Framework) { framework.ExpectNoError(err, "getting resource usage summary") } -func logContainers(f *framework.Framework, pod *v1.Pod) { +func logContainers(ctx context.Context, f *framework.Framework, pod *v1.Pod) { for _, container := range pod.Spec.Containers { - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, container.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, container.Name) framework.ExpectNoError(err, "Should be able to get container logs for container: %s", container.Name) framework.Logf("Got container logs for %s:\n%v", container.Name, logs) } @@ -225,27 +225,27 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() { f := framework.NewDefaultFramework("device-plugin-gpus") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("run Nvidia GPU Device Plugin tests", func(ctx context.Context) { - testNvidiaGPUs(f) + testNvidiaGPUs(ctx, f) }) }) -func testNvidiaGPUsJob(f *framework.Framework) { - _ = SetupNVIDIAGPUNode(f, false) +func testNvidiaGPUsJob(ctx context.Context, f *framework.Framework) { + _ = SetupNVIDIAGPUNode(ctx, f, false) // Job set to have 5 completions with parallelism of 1 to ensure that it lasts long enough to experience the node recreation completions := int32(5) ginkgo.By("Starting GPU job") - StartJob(f, completions) + StartJob(ctx, f, completions) - job, err := e2ejob.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add") + job, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, "cuda-add") framework.ExpectNoError(err) // make sure job is running by waiting for its first pod to start running - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1) framework.ExpectNoError(err) - numNodes, err := e2enode.TotalRegistered(f.ClientSet) + numNodes, err := e2enode.TotalRegistered(ctx, f.ClientSet) framework.ExpectNoError(err) - nodes, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) + nodes, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) framework.ExpectNoError(err) ginkgo.By("Recreating nodes") @@ -254,17 +254,17 @@ func testNvidiaGPUsJob(f *framework.Framework) { ginkgo.By("Done recreating nodes") ginkgo.By("Waiting for gpu job to finish") - err = e2ejob.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name) + err = e2ejob.WaitForJobFinish(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err) ginkgo.By("Done with gpu job") gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during node recreation: %v", job.Status.Failed) - VerifyJobNCompletions(f, completions) + VerifyJobNCompletions(ctx, f, completions) } // StartJob starts a simple CUDA job that requests gpu and the specified number of completions -func StartJob(f *framework.Framework, completions int32) { +func StartJob(ctx context.Context, f *framework.Framework, completions int32) { var activeSeconds int64 = 3600 testJob := e2ejob.NewTestJob("succeed", "cuda-add", v1.RestartPolicyAlways, 1, completions, &activeSeconds, 6) testJob.Spec.Template.Spec = v1.PodSpec{ @@ -283,15 +283,15 @@ func StartJob(f *framework.Framework, completions int32) { }, } ns := f.Namespace.Name - _, err := e2ejob.CreateJob(f.ClientSet, ns, testJob) + _, err := e2ejob.CreateJob(ctx, f.ClientSet, ns, testJob) framework.ExpectNoError(err) framework.Logf("Created job %v", testJob) } // VerifyJobNCompletions verifies that the job has completions number of successful pods -func VerifyJobNCompletions(f *framework.Framework, completions int32) { +func VerifyJobNCompletions(ctx context.Context, f *framework.Framework, completions int32) { ns := f.Namespace.Name - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add") + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, "cuda-add") framework.ExpectNoError(err) createdPods := pods.Items createdPodNames := podNames(createdPods) @@ -300,8 +300,8 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) { successes := int32(0) regex := regexp.MustCompile("PASSED") for _, podName := range createdPodNames { - e2epod.NewPodClient(f).WaitForFinish(podName, 5*time.Minute) - logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition") + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 5*time.Minute) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podName, "vector-addition") framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName) if regex.MatchString(logs) { successes++ @@ -327,6 +327,6 @@ var _ = SIGDescribe("GPUDevicePluginAcrossRecreate [Feature:Recreate]", func() { f := framework.NewDefaultFramework("device-plugin-gpus-recreate") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("run Nvidia GPU Device Plugin tests with a recreation", func(ctx context.Context) { - testNvidiaGPUsJob(f) + testNvidiaGPUsJob(ctx, f) }) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index b65fc4e2726..0079a576ebf 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -85,24 +85,24 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { f := framework.NewDefaultFramework("sched-pred") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.AfterEach(func() { - rc, err := cs.CoreV1().ReplicationControllers(ns).Get(context.TODO(), RCName, metav1.GetOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + rc, err := cs.CoreV1().ReplicationControllers(ns).Get(ctx, RCName, metav1.GetOptions{}) if err == nil && *(rc.Spec.Replicas) != 0 { ginkgo.By("Cleaning up the replication controller") - err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName) + err := e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, ns, RCName) framework.ExpectNoError(err) } }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} var err error - e2enode.AllNodesReady(cs, time.Minute) + e2enode.AllNodesReady(ctx, cs, time.Minute) - nodeList, err = e2enode.GetReadySchedulableNodes(cs) + nodeList, err = e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } @@ -111,12 +111,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { workerNodes.Insert(n.Name) } - err = framework.CheckTestingNSDeletedExcept(cs, ns) + err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) for _, node := range nodeList.Items { framework.Logf("\nLogging pods the apiserver thinks is on node %v before test", node.Name) - printAllPodsOnNode(cs, node.Name) + printAllPodsOnNode(ctx, cs, node.Name) } }) @@ -143,7 +143,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } WaitForStableCluster(cs, workerNodes) - pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) + pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] @@ -196,8 +196,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, }, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false) - verifyResult(cs, podsNeededForSaturation, 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podName, false) + verifyResult(ctx, cs, podsNeededForSaturation, 1, ns) }) // This test verifies we don't allow scheduling of pods in a way that sum of limits + @@ -218,15 +218,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { var handler string var beardsecond v1.ResourceName = "example.com/beardsecond" - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { WaitForStableCluster(cs, workerNodes) ginkgo.By("Add RuntimeClass and fake resource") // find a node which can run a pod: - testNodeName = GetNodeThatCanRunPod(f) + testNodeName = GetNodeThatCanRunPod(ctx, f) // Get node object: - node, err := cs.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, testNodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName) // update Node API object with a fake resource @@ -234,7 +234,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeCopy.ResourceVersion = "0" nodeCopy.Status.Capacity[beardsecond] = resource.MustParse("1000") - _, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{}) + _, err = cs.CoreV1().Nodes().UpdateStatus(ctx, nodeCopy, metav1.UpdateOptions{}) framework.ExpectNoError(err, "unable to apply fake resource to %v", testNodeName) // Register a runtimeClass with overhead set as 25% of the available beard-seconds @@ -249,28 +249,28 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, }, } - _, err = cs.NodeV1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{}) + _, err = cs.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Remove fake resource and RuntimeClass") // remove fake resource: if testNodeName != "" { // Get node object: - node, err := cs.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, testNodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName) nodeCopy := node.DeepCopy() // force it to update nodeCopy.ResourceVersion = "0" delete(nodeCopy.Status.Capacity, beardsecond) - _, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{}) + _, err = cs.CoreV1().Nodes().UpdateStatus(ctx, nodeCopy, metav1.UpdateOptions{}) framework.ExpectNoError(err, "unable to update node %v", testNodeName) } // remove RuntimeClass - cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler, metav1.DeleteOptions{}) + _ = cs.NodeV1beta1().RuntimeClasses().Delete(ctx, e2enode.PreconfiguredRuntimeClassHandler, metav1.DeleteOptions{}) }) ginkgo.It("verify pod overhead is accounted for", func(ctx context.Context) { @@ -281,7 +281,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { ginkgo.By("Starting Pod to consume most of the node's resource.") // Create pod which requires 70% of the available beard-seconds. - fillerPod := createPausePod(f, pausePodConfig{ + fillerPod := createPausePod(ctx, f, pausePodConfig{ Name: "filler-pod-" + string(uuid.NewUUID()), Resources: &v1.ResourceRequirements{ Requests: v1.ResourceList{beardsecond: resource.MustParse("700")}, @@ -290,7 +290,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }) // Wait for filler pod to schedule. - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, fillerPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, fillerPod)) ginkgo.By("Creating another pod that requires unavailable amount of resources.") // Create another pod that requires 20% of available beard-seconds, but utilizes the RuntimeClass @@ -307,8 +307,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false) - verifyResult(cs, 1, 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podName, false) + verifyResult(ctx, cs, 1, 1, ns) }) }) @@ -345,7 +345,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } // Apply node label to each node e2enode.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name) - e2enode.ExpectNodeHasLabel(cs, node.Name, "node", node.Name) + e2enode.ExpectNodeHasLabel(ctx, cs, node.Name, "node", node.Name) // Find allocatable amount of CPU. allocatable, found := node.Status.Allocatable[v1.ResourceCPU] if !found { @@ -363,7 +363,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } }() - pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) + pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] @@ -379,7 +379,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { for nodeName, cpu := range nodeToAllocatableMap { requestedCPU := cpu * 7 / 10 framework.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName) - fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{ + fillerPods = append(fillerPods, createPausePod(ctx, f, pausePodConfig{ Name: "filler-pod-" + string(uuid.NewUUID()), Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ @@ -410,7 +410,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } // Wait for filler pods to schedule. for _, pod := range fillerPods { - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod)) } ginkgo.By("Creating another pod that requires unavailable amount of CPU.") // Create another pod that requires 50% of the largest node CPU resources. @@ -429,8 +429,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, }, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false) - verifyResult(cs, len(fillerPods), 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podName, false) + verifyResult(ctx, cs, len(fillerPods), 1, ns) }) // Test Nodes does not have any label, hence it should be impossible to schedule Pod with @@ -454,8 +454,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false) - verifyResult(cs, 0, 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podName, false) + verifyResult(ctx, cs, 0, 1, ns) }) /* @@ -464,18 +464,18 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node. */ framework.ConformanceIt("validates that NodeSelector is respected if matching ", func(ctx context.Context) { - nodeName := GetNodeThatCanRunPod(f) + nodeName := GetNodeThatCanRunPod(ctx, f) ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, k, v) defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" - createPausePod(f, pausePodConfig{ + createPausePod(ctx, f, pausePodConfig{ Name: labelPodName, NodeSelector: map[string]string{ k: v, @@ -487,8 +487,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) - labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName)) + labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) }) @@ -530,25 +530,25 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, Labels: map[string]string{"name": "restricted"}, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false) - verifyResult(cs, 0, 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podName, false) + verifyResult(ctx, cs, 0, 1, ns) }) // Keep the same steps with the test on NodeSelector, // but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector. ginkgo.It("validates that required NodeAffinity setting is respected if matching", func(ctx context.Context) { - nodeName := GetNodeThatCanRunPod(f) + nodeName := GetNodeThatCanRunPod(ctx, f) ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, k, v) defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" - createPausePod(f, pausePodConfig{ + createPausePod(ctx, f, pausePodConfig{ Name: labelPodName, Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ @@ -574,8 +574,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) - labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName)) + labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) }) @@ -585,7 +585,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // 3. Try to relaunch the pod with tolerations tolerate the taints on node, // and the pod's nodeName specified to the name of node found in step 1 ginkgo.It("validates that taints-tolerations is respected if matching", func(ctx context.Context) { - nodeName := getNodeThatCanRunPodWithoutToleration(f) + nodeName := getNodeThatCanRunPodWithoutToleration(ctx, f) ginkgo.By("Trying to apply a random taint on the found node.") testTaint := v1.Taint{ @@ -593,20 +593,20 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Value: "testing-taint-value", Effect: v1.TaintEffectNoSchedule, } - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) - e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, labelKey, labelValue) defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey) ginkgo.By("Trying to relaunch the pod, now with tolerations.") tolerationPodName := "with-tolerations" - createPausePod(f, pausePodConfig{ + createPausePod(ctx, f, pausePodConfig{ Name: tolerationPodName, Tolerations: []v1.Toleration{{Key: testTaint.Key, Value: testTaint.Value, Effect: testTaint.Effect}}, NodeSelector: map[string]string{labelKey: labelValue}, @@ -617,8 +617,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new taint yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName)) - deployedPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, tolerationPodName)) + deployedPod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName) }) @@ -628,7 +628,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // 3. Try to relaunch the pod still no tolerations, // and the pod's nodeName specified to the name of node found in step 1 ginkgo.It("validates that taints-tolerations is respected if not matching", func(ctx context.Context) { - nodeName := getNodeThatCanRunPodWithoutToleration(f) + nodeName := getNodeThatCanRunPodWithoutToleration(ctx, f) ginkgo.By("Trying to apply a random taint on the found node.") testTaint := v1.Taint{ @@ -636,15 +636,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Value: "testing-taint-value", Effect: v1.TaintEffectNoSchedule, } - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint) ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) - e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, labelKey, labelValue) defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey) ginkgo.By("Trying to relaunch the pod, still no tolerations.") @@ -654,22 +654,22 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { NodeSelector: map[string]string{labelKey: labelValue}, } - WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false) - verifyResult(cs, 0, 1, ns) + WaitForSchedulerAfterAction(ctx, f, createPausePodAction(f, conf), ns, podNameNoTolerations, false) + verifyResult(ctx, cs, 0, 1, ns) ginkgo.By("Removing taint off the node") - WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true) - verifyResult(cs, 1, 0, ns) + WaitForSchedulerAfterAction(ctx, f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true) + verifyResult(ctx, cs, 1, 0, ns) }) ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func(ctx context.Context) { - nodeName := GetNodeThatCanRunPod(f) + nodeName := GetNodeThatCanRunPod(ctx, f) localhost := "127.0.0.1" if framework.TestContext.ClusterIsIPv6() { localhost = "::1" } - hostIP := getNodeHostIP(f, nodeName) + hostIP := getNodeHostIP(ctx, f, nodeName) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not ginkgo.By("Trying to apply a random label on the found node.") @@ -680,18 +680,18 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeSelector[k] = v e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, k, v) defer e2enode.RemoveLabelOffNode(cs, nodeName, k) port := int32(54321) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) - createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, nodeSelector, true) + createHostPortPodOnNode(ctx, f, "pod1", ns, localhost, port, v1.ProtocolTCP, nodeSelector, true) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP)) - createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, true) + createHostPortPodOnNode(ctx, f, "pod2", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, true) ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP)) - createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, nodeSelector, true) + createHostPortPodOnNode(ctx, f, "pod3", ns, hostIP, port, v1.ProtocolUDP, nodeSelector, true) }) @@ -702,8 +702,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { same node if one of those IPs is the default HostIP of 0.0.0.0, which represents all IPs on the host. */ framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func(ctx context.Context) { - nodeName := GetNodeThatCanRunPod(f) - hostIP := getNodeHostIP(f, nodeName) + nodeName := GetNodeThatCanRunPod(ctx, f) + hostIP := getNodeHostIP(ctx, f, nodeName) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) @@ -713,27 +713,27 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeSelector[k] = v e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, k, v) defer e2enode.RemoveLabelOffNode(cs, nodeName, k) port := int32(54322) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) - createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true) + createHostPortPodOnNode(ctx, f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP %s on the node which pod4 resides and expect not scheduled", port, hostIP)) - createHostPortPodOnNode(f, "pod5", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, false) + createHostPortPodOnNode(ctx, f, "pod5", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, false) }) ginkgo.Context("PodTopologySpread Filtering", func() { var nodeNames []string topologyKey := "kubernetes.io/e2e-pts-filter" - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if len(nodeList.Items) < 2 { ginkgo.Skip("At least 2 nodes are required to run the test") } ginkgo.By("Trying to get 2 available nodes which can run pod") - nodeNames = Get2NodesThatCanRunPod(f) + nodeNames = Get2NodesThatCanRunPod(ctx, f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) @@ -788,8 +788,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, }, } - runPauseRS(f, rsConfig) - podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + runPauseRS(ctx, f, rsConfig) + podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) numInNode1, numInNode2 := 0, 0 for _, pod := range podList.Items { @@ -821,10 +821,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }, }, } - createPauseRS(f, rsConfig) + createPauseRS(ctx, f, rsConfig) ginkgo.By("Expect all pods stay in pending state") - podList, err := e2epod.WaitForNumberOfPods(cs, ns, replicas, time.Minute) + podList, err := e2epod.WaitForNumberOfPods(ctx, cs, ns, replicas, time.Minute) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodsSchedulingGated(cs, ns, replicas, time.Minute)) @@ -874,8 +874,8 @@ func patchPod(cs clientset.Interface, old, new *v1.Pod) (*v1.Pod, error) { } // printAllPodsOnNode outputs status of all kubelet pods into log. -func printAllPodsOnNode(c clientset.Interface, nodeName string) { - podList, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.nodeName=" + nodeName}) +func printAllPodsOnNode(ctx context.Context, c clientset.Interface, nodeName string) { + podList, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{FieldSelector: "spec.nodeName=" + nodeName}) if err != nil { framework.Logf("Unable to retrieve pods for node %v: %v", nodeName, err) return @@ -940,37 +940,37 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { return pod } -func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { +func createPausePod(ctx context.Context, f *framework.Framework, conf pausePodConfig) *v1.Pod { namespace := conf.Namespace if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(ctx, initPausePod(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return pod } -func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { - return runPausePodWithTimeout(f, conf, framework.PollShortTimeout) +func runPausePod(ctx context.Context, f *framework.Framework, conf pausePodConfig) *v1.Pod { + return runPausePodWithTimeout(ctx, f, conf, framework.PollShortTimeout) } -func runPausePodWithTimeout(f *framework.Framework, conf pausePodConfig, timeout time.Duration) *v1.Pod { - pod := createPausePod(f, conf) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, timeout)) - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), conf.Name, metav1.GetOptions{}) +func runPausePodWithTimeout(ctx context.Context, f *framework.Framework, conf pausePodConfig, timeout time.Duration) *v1.Pod { + pod := createPausePod(ctx, f, conf) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, timeout)) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, conf.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod } -func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string { +func runPodAndGetNodeName(ctx context.Context, f *framework.Framework, conf pausePodConfig) string { // launch a pod to find a node which can launch a pod. We intentionally do // not just take the node list and choose the first of them. Depending on the // cluster and the scheduler it might be that a "normal" pod cannot be // scheduled onto it. - pod := runPausePod(f, conf) + pod := runPausePod(ctx, f, conf) ginkgo.By("Explicitly delete pod here to free the resource it takes.") - err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) return pod.Spec.NodeName @@ -995,28 +995,28 @@ func getRequestedStorageEphemeralStorage(pod v1.Pod) int64 { // removeTaintFromNodeAction returns a closure that removes the given taint // from the given node upon invocation. func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTaint v1.Taint) Action { - return func() error { - e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) + return func(ctx context.Context) error { + e2enode.RemoveTaintOffNode(ctx, cs, nodeName, testTaint) return nil } } // createPausePodAction returns a closure that creates a pause pod upon invocation. func createPausePodAction(f *framework.Framework, conf pausePodConfig) Action { - return func() error { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) + return func(ctx context.Context) error { + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, initPausePod(f, conf), metav1.CreateOptions{}) return err } } // WaitForSchedulerAfterAction performs the provided action and then waits for // scheduler to act on the given pod. -func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podName string, expectSuccess bool) { +func WaitForSchedulerAfterAction(ctx context.Context, f *framework.Framework, action Action, ns, podName string, expectSuccess bool) { predicate := scheduleFailureEvent(podName) if expectSuccess { predicate = scheduleSuccessEvent(ns, podName, "" /* any node */) } - observed, err := observeEventAfterAction(f.ClientSet, f.Namespace.Name, predicate, action) + observed, err := observeEventAfterAction(ctx, f.ClientSet, f.Namespace.Name, predicate, action) framework.ExpectNoError(err) if expectSuccess && !observed { framework.Failf("Did not observe success event after performing the supplied action for pod %v", podName) @@ -1027,8 +1027,8 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN } // TODO: upgrade calls in PodAffinity tests when we're able to run them -func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { - allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) +func verifyResult(ctx context.Context, c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { + allPods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods) @@ -1037,14 +1037,14 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched } // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it -func GetNodeThatCanRunPod(f *framework.Framework) string { +func GetNodeThatCanRunPod(ctx context.Context, f *framework.Framework) string { ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") - return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"}) + return runPodAndGetNodeName(ctx, f, pausePodConfig{Name: "without-label"}) } // Get2NodesThatCanRunPod return a 2-node slice where can run pod. -func Get2NodesThatCanRunPod(f *framework.Framework) []string { - firstNode := GetNodeThatCanRunPod(f) +func Get2NodesThatCanRunPod(ctx context.Context, f *framework.Framework) []string { + firstNode := GetNodeThatCanRunPod(ctx, f) ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") pod := pausePodConfig{ Name: "without-label", @@ -1062,17 +1062,17 @@ func Get2NodesThatCanRunPod(f *framework.Framework) []string { }, }, } - secondNode := runPodAndGetNodeName(f, pod) + secondNode := runPodAndGetNodeName(ctx, f, pod) return []string{firstNode, secondNode} } -func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string { +func getNodeThatCanRunPodWithoutToleration(ctx context.Context, f *framework.Framework) string { ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.") - return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"}) + return runPodAndGetNodeName(ctx, f, pausePodConfig{Name: "without-toleration"}) } // CreateHostPortPods creates RC with host port 4321 -func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { +func CreateHostPortPods(ctx context.Context, f *framework.Framework, id string, replicas int, expectRunning bool) { ginkgo.By("Running RC which reserves host port") config := &testutils.RCConfig{ Client: f.ClientSet, @@ -1083,14 +1083,14 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR Replicas: replicas, HostPorts: map[string]int{"port1": 4321}, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if expectRunning { framework.ExpectNoError(err) } } // CreateNodeSelectorPods creates RC with host port 4321 and defines node selector -func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error { +func CreateNodeSelectorPods(ctx context.Context, f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error { ginkgo.By("Running RC which reserves host port and defines node selector") config := &testutils.RCConfig{ @@ -1103,7 +1103,7 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod HostPorts: map[string]int{"port1": 4321}, NodeSelector: nodeSelector, } - err := e2erc.RunRC(*config) + err := e2erc.RunRC(ctx, *config) if expectRunning { return err } @@ -1112,7 +1112,7 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod // create pod which using hostport on the specified node according to the nodeSelector // it starts an http server on the exposed port -func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) { +func createHostPortPodOnNode(ctx context.Context, f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) { hostPortPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -1147,10 +1147,10 @@ func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, NodeSelector: nodeSelector, }, } - _, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), hostPortPod, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Pods(ns).Create(ctx, hostPortPod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitForPodNotPending(f.ClientSet, ns, podName) + err = e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, podName) if expectScheduled { framework.ExpectNoError(err) } @@ -1176,13 +1176,13 @@ func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, } // getNodeHostIP returns the first internal IP on the node matching the main Cluster IP family -func getNodeHostIP(f *framework.Framework, nodeName string) string { +func getNodeHostIP(ctx context.Context, f *framework.Framework, nodeName string) string { // Get the internal HostIP of the node family := v1.IPv4Protocol if framework.TestContext.ClusterIsIPv6() { family = v1.IPv6Protocol } - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ips := e2enode.GetAddressesByTypeAndFamily(node, v1.NodeInternalIP, family) framework.ExpectNotEqual(len(ips), 0) diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 5bff0dafa8f..bfb199fcc7c 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -81,33 +81,33 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { {name: highPriorityClassName, value: highPriority}, } - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0)) + _ = cs.SchedulingV1().PriorityClasses().Delete(ctx, pair.name, *metav1.NewDeleteOptions(0)) } for _, node := range nodeList.Items { nodeCopy := node.DeepCopy() delete(nodeCopy.Status.Capacity, testExtendedResource) delete(nodeCopy.Status.Allocatable, testExtendedResource) - err := patchNode(cs, &node, nodeCopy) + err := patchNode(ctx, cs, &node, nodeCopy) framework.ExpectNoError(err) } }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} var err error for _, pair := range priorityPairs { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("expected 'alreadyExists' as error, got instead: %v", err) } } - e2enode.WaitForTotalHealthy(cs, time.Minute) - nodeList, err = e2enode.GetReadySchedulableNodes(cs) + e2enode.WaitForTotalHealthy(ctx, cs, time.Minute) + nodeList, err = e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } @@ -116,7 +116,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { workerNodes.Insert(n.Name) } - err = framework.CheckTestingNSDeletedExcept(cs, ns) + err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) }) @@ -140,7 +140,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5") nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("5") - err := patchNode(cs, &node, nodeCopy) + err := patchNode(ctx, cs, &node, nodeCopy) framework.ExpectNoError(err) for j := 0; j < 2; j++ { @@ -153,7 +153,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { if len(pods) == 0 { priorityName = lowPriorityClassName } - pausePod := createPausePod(f, pausePodConfig{ + pausePod := createPausePod(ctx, f, pausePodConfig{ Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName), PriorityClassName: priorityName, Resources: &v1.ResourceRequirements{ @@ -184,7 +184,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod)) } // Set the pod request to the first pod's resources (should be low priority pod) @@ -192,7 +192,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { ginkgo.By("Run a high priority pod that has same requirements as that of lower priority pod") // Create a high priority pod and make sure it is scheduled on the same node as the low priority pod. - runPausePodWithTimeout(f, pausePodConfig{ + runPausePodWithTimeout(ctx, f, pausePodConfig{ Name: "preemptor-pod", PriorityClassName: highPriorityClassName, Resources: &v1.ResourceRequirements{ @@ -201,14 +201,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }, }, framework.PodStartShortTimeout) - preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{}) + preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(ctx, pods[0].Name, metav1.GetOptions{}) podPreempted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) if !podPreempted { framework.Failf("expected pod to be preempted, instead got pod %+v and error %v", preemptedPod, err) } for i := 1; i < len(pods); i++ { - livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{}) + livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(ctx, pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } @@ -231,7 +231,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("5") nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("5") - err := patchNode(cs, &node, nodeCopy) + err := patchNode(ctx, cs, &node, nodeCopy) framework.ExpectNoError(err) for j := 0; j < 2; j++ { @@ -244,7 +244,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { if len(pods) == 0 { priorityName = lowPriorityClassName } - pausePod := createPausePod(f, pausePodConfig{ + pausePod := createPausePod(ctx, f, pausePodConfig{ Name: fmt.Sprintf("pod%d-%d-%v", i, j, priorityName), PriorityClassName: priorityName, Resources: &v1.ResourceRequirements{ @@ -275,7 +275,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, pod)) } // We want this pod to be preempted @@ -285,12 +285,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod // Always run cleanup to make sure the pod is properly cleaned up. - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, "critical-pod", *metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err) } }() - runPausePodWithTimeout(f, pausePodConfig{ + runPausePodWithTimeout(ctx, f, pausePodConfig{ Name: "critical-pod", Namespace: metav1.NamespaceSystem, PriorityClassName: scheduling.SystemClusterCritical, @@ -302,15 +302,15 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, "critical-pod", *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }() // Make sure that the lowest priority pod is deleted. - preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{}) + preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(ctx, pods[0].Name, metav1.GetOptions{}) podPreempted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) for i := 1; i < len(pods); i++ { - livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{}) + livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(ctx, pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } @@ -333,7 +333,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("1") nodeCopy.Status.Allocatable[testExtendedResource] = resource.MustParse("1") - err := patchNode(cs, &node, nodeCopy) + err := patchNode(ctx, cs, &node, nodeCopy) framework.ExpectNoError(err) // prepare node affinity to make sure both the lower and higher priority pods are scheduled on the same node @@ -352,7 +352,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } ginkgo.By("Create a low priority pod that consumes 1/1 of node resources") - victimPod := createPausePod(f, pausePodConfig{ + victimPod := createPausePod(ctx, f, pausePodConfig{ Name: "victim-pod", PriorityClassName: lowPriorityClassName, Resources: &v1.ResourceRequirements{ @@ -365,13 +365,13 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { framework.Logf("Created pod: %v", victimPod.Name) ginkgo.By("Wait for the victim pod to be scheduled") - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, victimPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, cs, victimPod)) // Remove the finalizer so that the victim pod can be GCed - defer e2epod.NewPodClient(f).RemoveFinalizer(victimPod.Name, testFinalizer) + defer e2epod.NewPodClient(f).RemoveFinalizer(ctx, victimPod.Name, testFinalizer) ginkgo.By("Create a high priority pod to trigger preemption of the lower priority pod") - preemptorPod := createPausePod(f, pausePodConfig{ + preemptorPod := createPausePod(ctx, f, pausePodConfig{ Name: "preemptor-pod", PriorityClassName: highPriorityClassName, Resources: &v1.ResourceRequirements{ @@ -383,11 +383,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { framework.Logf("Created pod: %v", preemptorPod.Name) ginkgo.By("Waiting for the victim pod to be terminating") - err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout) + err = e2epod.WaitForPodTerminatingInNamespaceTimeout(ctx, f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout) framework.ExpectNoError(err) ginkgo.By("Verifying the pod has the pod disruption condition") - e2epod.VerifyPodHasConditionWithType(f, victimPod, v1.DisruptionTarget) + e2epod.VerifyPodHasConditionWithType(ctx, f, victimPod, v1.DisruptionTarget) }) ginkgo.Context("PodTopologySpread Preemption", func() { @@ -396,29 +396,29 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { topologyKey := "kubernetes.io/e2e-pts-preemption" var fakeRes v1.ResourceName = "example.com/fakePTSRes" - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if len(nodeList.Items) < 2 { ginkgo.Skip("At least 2 nodes are required to run the test") } ginkgo.By("Trying to get 2 available nodes which can run pod") - nodeNames = Get2NodesThatCanRunPod(f) + nodeNames = Get2NodesThatCanRunPod(ctx, f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // update Node API object with a fake resource ginkgo.By(fmt.Sprintf("Apply 10 fake resource to node %v.", node.Name)) nodeCopy := node.DeepCopy() nodeCopy.Status.Capacity[fakeRes] = resource.MustParse("10") nodeCopy.Status.Allocatable[fakeRes] = resource.MustParse("10") - err = patchNode(cs, node, nodeCopy) + err = patchNode(ctx, cs, node, nodeCopy) framework.ExpectNoError(err) nodes = append(nodes, node) } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { for _, nodeName := range nodeNames { e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey) } @@ -426,7 +426,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() delete(nodeCopy.Status.Capacity, fakeRes) delete(nodeCopy.Status.Allocatable, fakeRes) - err := patchNode(cs, node, nodeCopy) + err := patchNode(ctx, cs, node, nodeCopy) framework.ExpectNoError(err) } }) @@ -474,10 +474,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { ginkgo.By("Create 1 High Pod and 3 Low Pods to occupy 9/10 of fake resources on both nodes.") // Prepare 1 High Pod and 3 Low Pods - runPausePod(f, highPodCfg) + runPausePod(ctx, f, highPodCfg) for i := 1; i <= 3; i++ { lowPodCfg.Name = fmt.Sprintf("low-%v", i) - runPausePod(f, lowPodCfg) + runPausePod(ctx, f, lowPodCfg) } ginkgo.By("Create 1 Medium Pod with TopologySpreadConstraints") @@ -511,14 +511,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // However, in that case, the Pods spread becomes [, ], which doesn't // satisfy the pod topology spread constraints. Hence it needs to preempt another low pod // to make the Pods spread like [, ]. - runPausePod(f, mediumPodCfg) + runPausePod(ctx, f, mediumPodCfg) ginkgo.By("Verify there are 3 Pods left in this namespace") wantPods := sets.NewString("high", "medium", "low") // Wait until the number of pods stabilizes. Note that `medium` pod can get scheduled once the // second low priority pod is marked as terminating. - pods, err := e2epod.WaitForNumberOfPods(cs, ns, 3, framework.PollShortTimeout) + pods, err := e2epod.WaitForNumberOfPods(ctx, cs, ns, 3, framework.PollShortTimeout) framework.ExpectNoError(err) for _, pod := range pods.Items { @@ -546,11 +546,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { priorityPairs := make([]priorityPair, 0) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // print out additional info if tests failed if ginkgo.CurrentSpecReport().Failed() { // List existing PriorityClasses. - priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{}) + priorityList, err := cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Unable to list PriorityClasses: %v", err) } else { @@ -565,26 +565,26 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() delete(nodeCopy.Status.Capacity, fakecpu) delete(nodeCopy.Status.Allocatable, fakecpu) - err := patchNode(cs, node, nodeCopy) + err := patchNode(ctx, cs, node, nodeCopy) framework.ExpectNoError(err) } for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0)) + _ = cs.SchedulingV1().PriorityClasses().Delete(ctx, pair.name, *metav1.NewDeleteOptions(0)) } }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name // find an available node ginkgo.By("Finding an available node") - nodeName := GetNodeThatCanRunPod(f) + nodeName := GetNodeThatCanRunPod(ctx, f) framework.Logf("found a healthy node: %s", nodeName) // get the node API object var err error - node, err = cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err = cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { framework.Failf("error getting node %q: %v", nodeName, err) } @@ -598,7 +598,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeCopy := node.DeepCopy() nodeCopy.Status.Capacity[fakecpu] = resource.MustParse("1000") nodeCopy.Status.Allocatable[fakecpu] = resource.MustParse("1000") - err = patchNode(cs, node, nodeCopy) + err = patchNode(ctx, cs, node, nodeCopy) framework.ExpectNoError(err) // create four PriorityClass: p1, p2, p3, p4 @@ -606,7 +606,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { priorityName := fmt.Sprintf("p%d", i) priorityVal := int32(i) priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal}) - _, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}, metav1.CreateOptions{}) + _, err := cs.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}, metav1.CreateOptions{}) if err != nil { framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", priorityName, priorityVal, apierrors.ReasonForError(err), err) } @@ -629,11 +629,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { _, podController := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options) }, }, &v1.Pod{}, @@ -702,7 +702,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } // create ReplicaSet{1,2,3} so as to occupy 950/1000 fake resource for i := range rsConfs { - runPauseRS(f, rsConfs[i]) + runPauseRS(ctx, f, rsConfs[i]) } framework.Logf("pods created so far: %v", podNamesSeen) @@ -720,8 +720,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { Limits: v1.ResourceList{fakecpu: resource.MustParse("500")}, }, } - preemptorPod := createPod(f, preemptorPodConf) - waitForPreemptingWithTimeout(f, preemptorPod, framework.PodGetTimeout) + preemptorPod := createPod(ctx, f, preemptorPodConf) + waitForPreemptingWithTimeout(ctx, f, preemptorPod, framework.PodGetTimeout) framework.Logf("pods created so far: %v", podNamesSeen) @@ -768,12 +768,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { testUUID := uuid.New().String() var pcs []*schedulingv1.PriorityClass - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet // Create 2 PriorityClass: p1, p2. for i := 1; i <= 2; i++ { name, val := fmt.Sprintf("p%d", i), int32(i) - pc, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"e2e": testUUID}}, Value: val}, metav1.CreateOptions{}) + pc, err := cs.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"e2e": testUUID}}, Value: val}, metav1.CreateOptions{}) if err != nil { framework.Logf("Failed to create priority '%v/%v'. Reason: %v. Msg: %v", name, val, apierrors.ReasonForError(err), err) } @@ -784,11 +784,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Print out additional info if tests failed. if ginkgo.CurrentSpecReport().Failed() { // List existing PriorityClasses. - priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{}) + priorityList, err := cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Unable to list PriorityClasses: %v", err) } else { @@ -800,7 +800,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } // Collection deletion on created PriorityClasses. - err := cs.SchedulingV1().PriorityClasses().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: fmt.Sprintf("e2e=%v", testUUID)}) + err := cs.SchedulingV1().PriorityClasses().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: fmt.Sprintf("e2e=%v", testUUID)}) framework.ExpectNoError(err) }) @@ -815,13 +815,13 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // 1. Patch/Update on immutable fields will fail. pcCopy := pcs[0].DeepCopy() pcCopy.Value = pcCopy.Value * 10 - err := patchPriorityClass(cs, pcs[0], pcCopy) + err := patchPriorityClass(ctx, cs, pcs[0], pcCopy) framework.ExpectError(err, "expect a patch error on an immutable field") framework.Logf("%v", err) pcCopy = pcs[1].DeepCopy() pcCopy.Value = pcCopy.Value * 10 - _, err = cs.SchedulingV1().PriorityClasses().Update(context.TODO(), pcCopy, metav1.UpdateOptions{}) + _, err = cs.SchedulingV1().PriorityClasses().Update(ctx, pcCopy, metav1.UpdateOptions{}) framework.ExpectError(err, "expect an update error on an immutable field") framework.Logf("%v", err) @@ -829,21 +829,21 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { newDesc := "updated description" pcCopy = pcs[0].DeepCopy() pcCopy.Description = newDesc - err = patchPriorityClass(cs, pcs[0], pcCopy) + err = patchPriorityClass(ctx, cs, pcs[0], pcCopy) framework.ExpectNoError(err) pcCopy = pcs[1].DeepCopy() pcCopy.Description = newDesc - _, err = cs.SchedulingV1().PriorityClasses().Update(context.TODO(), pcCopy, metav1.UpdateOptions{}) + _, err = cs.SchedulingV1().PriorityClasses().Update(ctx, pcCopy, metav1.UpdateOptions{}) framework.ExpectNoError(err) // 3. List existing PriorityClasses. - _, err = cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{}) + _, err = cs.SchedulingV1().PriorityClasses().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // 4. Verify fields of updated PriorityClasses. for _, pc := range pcs { - livePC, err := cs.SchedulingV1().PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{}) + livePC, err := cs.SchedulingV1().PriorityClasses().Get(ctx, pc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(livePC.Value, pc.Value) framework.ExpectEqual(livePC.Description, newDesc) @@ -878,37 +878,37 @@ func initPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet return pauseRS } -func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { +func createPauseRS(ctx context.Context, f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { namespace := conf.PodConfig.Namespace if len(namespace) == 0 { namespace = f.Namespace.Name } - rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(context.TODO(), initPauseRS(f, conf), metav1.CreateOptions{}) + rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(ctx, initPauseRS(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return rs } -func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { - rs := createPauseRS(f, conf) - framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout)) +func runPauseRS(ctx context.Context, f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { + rs := createPauseRS(ctx, f, conf) + framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx, f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout)) return rs } -func createPod(f *framework.Framework, conf pausePodConfig) *v1.Pod { +func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig) *v1.Pod { namespace := conf.Namespace if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(ctx, initPausePod(f, conf), metav1.CreateOptions{}) framework.ExpectNoError(err) return pod } // waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks // if the 'spec.NodeName' field of preemptor 'pod' has been set. -func waitForPreemptingWithTimeout(f *framework.Framework, pod *v1.Pod, timeout time.Duration) { +func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) { err := wait.Poll(2*time.Second, timeout, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -920,7 +920,7 @@ func waitForPreemptingWithTimeout(f *framework.Framework, pod *v1.Pod, timeout t framework.ExpectNoError(err, "pod %v/%v failed to preempt other pods", pod.Namespace, pod.Name) } -func patchNode(client clientset.Interface, old *v1.Node, new *v1.Node) error { +func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error { oldData, err := json.Marshal(old) if err != nil { return err @@ -934,11 +934,11 @@ func patchNode(client clientset.Interface, old *v1.Node, new *v1.Node) error { if err != nil { return fmt.Errorf("failed to create merge patch for node %q: %v", old.Name, err) } - _, err = client.CoreV1().Nodes().Patch(context.TODO(), old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") + _, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } -func patchPriorityClass(cs clientset.Interface, old, new *schedulingv1.PriorityClass) error { +func patchPriorityClass(ctx context.Context, cs clientset.Interface, old, new *schedulingv1.PriorityClass) error { oldData, err := json.Marshal(old) if err != nil { return err @@ -952,6 +952,6 @@ func patchPriorityClass(cs clientset.Interface, old, new *schedulingv1.PriorityC if err != nil { return fmt.Errorf("failed to create merge patch for PriorityClass %q: %v", old.Name, err) } - _, err = cs.SchedulingV1().PriorityClasses().Patch(context.TODO(), old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = cs.SchedulingV1().PriorityClasses().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return err } diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 5edec00fd3d..e0eafc70486 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -73,8 +73,8 @@ var podRequestedResource = &v1.ResourceRequirements{ // nodesAreTooUtilized ensures that each node can support 2*crioMinMemLimit // We check for double because it needs to support at least the cri-o minimum // plus whatever delta between node usages (which could be up to or at least crioMinMemLimit) -func nodesAreTooUtilized(cs clientset.Interface, nodeList *v1.NodeList) bool { - nodeNameToPodList := podListForEachNode(cs) +func nodesAreTooUtilized(ctx context.Context, cs clientset.Interface, nodeList *v1.NodeList) bool { + nodeNameToPodList := podListForEachNode(ctx, cs) for _, node := range nodeList.Items { _, memFraction, _, memAllocatable := computeCPUMemFraction(node, podRequestedResource, nodeNameToPodList[node.Name]) if float64(memAllocatable)-(memFraction*float64(memAllocatable)) < float64(2*crioMinMemLimit) { @@ -93,30 +93,27 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { f := framework.NewDefaultFramework("sched-priority") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.AfterEach(func() { - }) - - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs = f.ClientSet ns = f.Namespace.Name nodeList = &v1.NodeList{} var err error - e2enode.WaitForTotalHealthy(cs, time.Minute) - nodeList, err = e2enode.GetReadySchedulableNodes(cs) + e2enode.WaitForTotalHealthy(ctx, cs, time.Minute) + nodeList, err = e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } framework.ExpectNoErrorWithOffset(0, err) - err = framework.CheckTestingNSDeletedExcept(cs, ns) + err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) - err = e2epod.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) // skip if the most utilized node has less than the cri-o minMemLimit available // otherwise we will not be able to run the test pod once all nodes are balanced - if nodesAreTooUtilized(cs, nodeList) { + if nodesAreTooUtilized(ctx, cs, nodeList) { ginkgo.Skip("nodes are too utilized to schedule test pods") } }) @@ -126,7 +123,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { e2eskipper.SkipUnlessNodeCountIsAtLeast(2) ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.") - pod := runPausePod(f, pausePodConfig{ + pod := runPausePod(ctx, f, pausePodConfig{ Name: "pod-with-label-security-s1", Labels: map[string]string{"security": "S1"}, }) @@ -134,7 +131,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { k := v1.LabelHostname ginkgo.By("Verifying the node has a label " + k) - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) if _, hasLabel := node.Labels[k]; !hasLabel { // If the label is not exists, label all nodes for testing. @@ -143,7 +140,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { k = "kubernetes.io/e2e-node-topologyKey" v := "topologyvalue1" e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, nodeName, k, v) defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to apply a label on other nodes.") @@ -151,18 +148,18 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { for _, node := range nodeList.Items { if node.Name != nodeName { e2enode.AddOrUpdateLabelOnNode(cs, node.Name, k, v) - e2enode.ExpectNodeHasLabel(cs, node.Name, k, v) + e2enode.ExpectNodeHasLabel(ctx, cs, node.Name, k, v) defer e2enode.RemoveLabelOffNode(cs, node.Name, k) } } } // make the nodes have balanced cpu,mem usage - err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6) + err = createBalancedPodForNodes(ctx, f, cs, ns, nodeList.Items, podRequestedResource, 0.6) framework.ExpectNoError(err) ginkgo.By("Trying to launch the pod with podAntiAffinity.") labelPodName := "pod-with-pod-antiaffinity" - pod = createPausePod(f, pausePodConfig{ + pod = createPausePod(ctx, f, pausePodConfig{ Resources: podRequestedResource, Name: labelPodName, Affinity: &v1.Affinity{ @@ -197,8 +194,8 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { }, }) ginkgo.By("Wait the pod becomes running") - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) - labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) + labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Verify the pod was scheduled to the expected node.") framework.ExpectNotEqual(labelPod.Spec.NodeName, nodeName) @@ -206,7 +203,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func(ctx context.Context) { // make the nodes have balanced cpu,mem usage ratio - err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) + err := createBalancedPodForNodes(ctx, f, cs, ns, nodeList.Items, podRequestedResource, 0.5) framework.ExpectNoError(err) // Apply 10 taints to first node nodeName := nodeList.Items[0].Name @@ -236,7 +233,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { // panic and RemoveTaintsOffNode does not return an error if the taint does not exist. ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, nodeName, tolerableTaints) for _, taint := range tolerableTaints { - addTaintToNode(cs, nodeName, taint) + addTaintToNode(ctx, cs, nodeName, taint) } // Apply the intolerable taints to each of the following nodes ginkgo.By("Adding 10 intolerable taints to all other nodes") @@ -244,20 +241,20 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { node := nodeList.Items[i] ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, node.Name, intolerableTaints[node.Name]) for _, taint := range intolerableTaints[node.Name] { - addTaintToNode(cs, node.Name, taint) + addTaintToNode(ctx, cs, node.Name, taint) } } tolerationPodName := "with-tolerations" ginkgo.By("Create a pod that tolerates all the taints of the first node.") - pod := createPausePod(f, pausePodConfig{ + pod := createPausePod(ctx, f, pausePodConfig{ Name: tolerationPodName, Tolerations: tolerations, }) - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)) ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.") - tolePod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{}) + tolePod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(tolePod.Spec.NodeName, nodeName) }) @@ -266,12 +263,12 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { var nodeNames []string topologyKey := "kubernetes.io/e2e-pts-score" - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if len(nodeList.Items) < 2 { ginkgo.Skip("At least 2 nodes are required to run the test") } ginkgo.By("Trying to get 2 available nodes which can run pod") - nodeNames = Get2NodesThatCanRunPod(f) + nodeNames = Get2NodesThatCanRunPod(ctx, f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) @@ -286,13 +283,13 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { ginkgo.It("validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed", func(ctx context.Context) { var nodes []v1.Node for _, nodeName := range nodeNames { - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodes = append(nodes, *node) } // Make the nodes have balanced cpu,mem usage. - err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5) + err := createBalancedPodForNodes(ctx, f, cs, ns, nodes, podRequestedResource, 0.5) framework.ExpectNoError(err) replicas := 4 @@ -307,7 +304,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { NodeSelector: map[string]string{topologyKey: nodeNames[0]}, }, } - runPauseRS(f, rsConfig) + runPauseRS(ctx, f, rsConfig) // Run a Pod with WhenUnsatisfiable:ScheduleAnyway. podCfg := pausePodConfig{ @@ -349,7 +346,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { }, }, } - testPod := runPausePod(f, podCfg) + testPod := runPausePod(ctx, f, podCfg) ginkgo.By(fmt.Sprintf("Verifying if the test-pod lands on node %q", nodeNames[1])) framework.ExpectEqual(nodeNames[1], testPod.Spec.NodeName) }) @@ -357,17 +354,17 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { }) // createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio. -func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) error { - cleanUp := func() { +func createBalancedPodForNodes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) error { + cleanUp := func(ctx context.Context) { // Delete all remaining pods - err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + err := cs.CoreV1().Pods(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(), }) if err != nil { framework.Logf("Failed to delete memory balanced pods: %v.", err) } else { - err := wait.PollImmediate(2*time.Second, time.Minute, func() (bool, error) { - podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + err := wait.PollImmediateWithContext(ctx, 2*time.Second, time.Minute, func(ctx context.Context) (bool, error) { + podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(), }) if err != nil { @@ -392,7 +389,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n var memFractionMap = make(map[string]float64) // For each node, stores its pods info - nodeNameToPodList := podListForEachNode(cs) + nodeNameToPodList := podListForEachNode(ctx, cs) for _, node := range nodes { cpuFraction, memFraction, _, _ := computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name]) @@ -474,7 +471,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n return errors.NewAggregate(errs) } - nodeNameToPodList = podListForEachNode(cs) + nodeNameToPodList = podListForEachNode(ctx, cs) for _, node := range nodes { ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.") computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name]) @@ -483,9 +480,9 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n return nil } -func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod { +func podListForEachNode(ctx context.Context, cs clientset.Interface) map[string][]*v1.Pod { nodeNameToPodList := make(map[string][]*v1.Pod) - allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) + allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) if err != nil { framework.Failf("Expect error of invalid, got : %v", err) } @@ -557,7 +554,7 @@ func getRandomTaint() v1.Taint { } } -func addTaintToNode(cs clientset.Interface, nodeName string, testTaint v1.Taint) { - e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) +func addTaintToNode(ctx context.Context, cs clientset.Interface, nodeName string, testTaint v1.Taint) { + e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint) } diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index af27373864d..5f8c39e1dff 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -47,11 +47,11 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { var zoneCount int var err error var zoneNames sets.String - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { cs := f.ClientSet if zoneCount <= 0 { - zoneNames, err = e2enode.GetSchedulableClusterZones(cs) + zoneNames, err = e2enode.GetSchedulableClusterZones(ctx, cs) framework.ExpectNoError(err) zoneCount = len(zoneNames) } @@ -60,26 +60,26 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { e2eskipper.SkipUnlessAtLeast(zoneCount, 2, msg) // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread - e2enode.WaitForTotalHealthy(cs, time.Minute) - nodeList, err := e2enode.GetReadySchedulableNodes(cs) + e2enode.WaitForTotalHealthy(ctx, cs, time.Minute) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) // make the nodes have balanced cpu,mem usage - err = createBalancedPodForNodes(f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0) + err = createBalancedPodForNodes(ctx, f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0) framework.ExpectNoError(err) }) ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) { - SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName()) + SpreadServiceOrFail(ctx, f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName()) }) ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func(ctx context.Context) { - SpreadRCOrFail(f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"}) + SpreadRCOrFail(ctx, f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"}) }) }) // SpreadServiceOrFail check that the pods comprising a service // get spread evenly across available zones -func SpreadServiceOrFail(f *framework.Framework, replicaCount int, zoneNames sets.String, image string) { +func SpreadServiceOrFail(ctx context.Context, f *framework.Framework, replicaCount int, zoneNames sets.String, image string) { // First create the service serviceName := "test-service" serviceSpec := &v1.Service{ @@ -97,7 +97,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, zoneNames set }}, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, serviceSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) // Now create some pods behind the service @@ -124,11 +124,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, zoneNames set // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) - pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones - checkZoneSpreading(f.ClientSet, pods, zoneNames.List()) + checkZoneSpreading(ctx, f.ClientSet, pods, zoneNames.List()) } // Find the name of the zone in which a Node is running @@ -143,16 +143,16 @@ func getZoneNameForNode(node v1.Node) (string, error) { } // Find the name of the zone in which the pod is scheduled -func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) { +func getZoneNameForPod(ctx context.Context, c clientset.Interface, pod v1.Pod) (string, error) { ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) - node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return getZoneNameForNode(*node) } // Determine whether a set of pods are approximately evenly spread // across a given set of zones -func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []string) { +func checkZoneSpreading(ctx context.Context, c clientset.Interface, pods *v1.PodList, zoneNames []string) { podsPerZone := make(map[string]int) for _, zoneName := range zoneNames { podsPerZone[zoneName] = 0 @@ -161,7 +161,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str if pod.DeletionTimestamp != nil { continue } - zoneName, err := getZoneNameForPod(c, pod) + zoneName, err := getZoneNameForPod(ctx, c, pod) framework.ExpectNoError(err) podsPerZone[zoneName] = podsPerZone[zoneName] + 1 } @@ -182,10 +182,10 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str // SpreadRCOrFail Check that the pods comprising a replication // controller get spread evenly across available zones -func SpreadRCOrFail(f *framework.Framework, replicaCount int32, zoneNames sets.String, image string, args []string) { +func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount int32, zoneNames sets.String, image string, args []string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) - controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), &v1.ReplicationController{ + controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, &v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, Name: name, @@ -216,20 +216,20 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, zoneNames sets.S // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { + if err := e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, controller.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - _, err = e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) + _, err = e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicaCount) framework.ExpectNoError(err) // Wait for all of them to be scheduled ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones - checkZoneSpreading(f.ClientSet, pods, zoneNames.List()) + checkZoneSpreading(ctx, f.ClientSet, pods, zoneNames.List()) } diff --git a/test/e2e/storage/csi_inline.go b/test/e2e/storage/csi_inline.go index c5b2cf065ae..a30e66763c2 100644 --- a/test/e2e/storage/csi_inline.go +++ b/test/e2e/storage/csi_inline.go @@ -82,33 +82,33 @@ var _ = utils.SIGDescribe("CSIInlineVolumes", func() { } ginkgo.By("creating") - createdDriver1, err := client.Create(context.TODO(), driver1, metav1.CreateOptions{}) + createdDriver1, err := client.Create(ctx, driver1, metav1.CreateOptions{}) framework.ExpectNoError(err) - createdDriver2, err := client.Create(context.TODO(), driver2, metav1.CreateOptions{}) + createdDriver2, err := client.Create(ctx, driver2, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = client.Create(context.TODO(), driver1, metav1.CreateOptions{}) + _, err = client.Create(ctx, driver1, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { framework.Failf("expected 409, got %#v", err) } ginkgo.By("getting") - retrievedDriver1, err := client.Get(context.TODO(), createdDriver1.Name, metav1.GetOptions{}) + retrievedDriver1, err := client.Get(ctx, createdDriver1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(retrievedDriver1.UID, createdDriver1.UID) - retrievedDriver2, err := client.Get(context.TODO(), createdDriver2.Name, metav1.GetOptions{}) + retrievedDriver2, err := client.Get(ctx, createdDriver2.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(retrievedDriver2.UID, createdDriver2.UID) ginkgo.By("listing") - driverList, err := client.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + driverList, err := client.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(driverList.Items), 2, "filtered list should have 2 items, got: %s", driverList) ginkgo.By("deleting") for _, driver := range driverList.Items { - err := client.Delete(context.TODO(), driver.Name, metav1.DeleteOptions{}) + err := client.Delete(ctx, driver.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - retrievedDriver, err := client.Get(context.TODO(), driver.Name, metav1.GetOptions{}) + retrievedDriver, err := client.Get(ctx, driver.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): // Okay, normal case. @@ -188,32 +188,32 @@ var _ = utils.SIGDescribe("CSIInlineVolumes", func() { } ginkgo.By("creating") - createdPod, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := client.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = client.Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = client.Create(ctx, pod, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { framework.Failf("expected 409, got %#v", err) } ginkgo.By("getting") - retrievedPod, err := client.Get(context.TODO(), podName, metav1.GetOptions{}) + retrievedPod, err := client.Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(retrievedPod.UID, createdPod.UID) ginkgo.By("listing in namespace") - podList, err := client.List(context.TODO(), metav1.ListOptions{}) + podList, err := client.List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(podList.Items), 1, "list should have 1 items, got: %s", podList) ginkgo.By("patching") - patchedPod, err := client.Patch(context.TODO(), createdPod.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedPod, err := client.Patch(ctx, createdPod.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedPod.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("deleting") - err = client.Delete(context.TODO(), createdPod.Name, metav1.DeleteOptions{}) + err = client.Delete(ctx, createdPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - retrievedPod, err = client.Get(context.TODO(), createdPod.Name, metav1.GetOptions{}) + retrievedPod, err = client.Get(ctx, createdPod.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): // Okay, normal case. diff --git a/test/e2e/storage/csi_mock/base.go b/test/e2e/storage/csi_mock/base.go index 0dd0ca3edc6..4f198097d2a 100644 --- a/test/e2e/storage/csi_mock/base.go +++ b/test/e2e/storage/csi_mock/base.go @@ -143,7 +143,7 @@ func newMockDriverSetup(f *framework.Framework) *mockDriverSetup { } } -func (m *mockDriverSetup) init(tp testParameters) { +func (m *mockDriverSetup) init(ctx context.Context, tp testParameters) { m.cs = m.f.ClientSet m.tp = tp @@ -190,7 +190,7 @@ func (m *mockDriverSetup) init(tp testParameters) { } m.driver = drivers.InitMockCSIDriver(driverOpts) - config := m.driver.PrepareTest(m.f) + config := m.driver.PrepareTest(ctx, m.f) m.config = config m.provisioner = config.GetUniqueDriverName() @@ -202,17 +202,17 @@ func (m *mockDriverSetup) init(tp testParameters) { // Wait for the CSIDriver actually get deployed and CSINode object to be generated. // This indicates the mock CSI driver pod is up and running healthy. - err = drivers.WaitForCSIDriverRegistrationOnNode(m.config.ClientNodeSelection.Name, m.config.GetUniqueDriverName(), m.cs) + err = drivers.WaitForCSIDriverRegistrationOnNode(ctx, m.config.ClientNodeSelection.Name, m.config.GetUniqueDriverName(), m.cs) framework.ExpectNoError(err, "Failed to register CSIDriver %v", m.config.GetUniqueDriverName()) } -func (m *mockDriverSetup) cleanup() { +func (m *mockDriverSetup) cleanup(ctx context.Context) { cs := m.f.ClientSet var errs []error for _, pod := range m.pods { ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) - errs = append(errs, e2epod.DeletePodWithWait(cs, pod)) + errs = append(errs, e2epod.DeletePodWithWait(ctx, cs, pod)) } for _, claim := range m.pvcs { @@ -223,7 +223,7 @@ func (m *mockDriverSetup) cleanup() { errs = append(errs, err) } if claim.Spec.VolumeName != "" { - errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)) + errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(ctx, cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)) } } } @@ -242,11 +242,11 @@ func (m *mockDriverSetup) cleanup() { framework.ExpectNoError(err, "while cleaning up after test") } -func (m *mockDriverSetup) createPod(withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) { +func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) { ginkgo.By("Creating pod") f := m.f - sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") + sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "") scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Timeouts: f.Timeouts, @@ -275,7 +275,7 @@ func (m *mockDriverSetup) createPod(withVolume volumeType) (class *storagev1.Sto }, } case pvcReference: - class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name) + class, claim, pod = startPausePod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name) if class != nil { m.sc[class.Name] = class } @@ -300,12 +300,12 @@ func (m *mockDriverSetup) createPodWithPVC(pvc *v1.PersistentVolumeClaim) (*v1.P return pod, err } -func (m *mockDriverSetup) createPodWithFSGroup(fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { f := m.f ginkgo.By("Creating pod with fsGroup") nodeSelection := m.config.ClientNodeSelection - sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") + sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "") scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -315,7 +315,7 @@ func (m *mockDriverSetup) createPodWithFSGroup(fsGroup *int64) (*storagev1.Stora DelayBinding: m.tp.lateBinding, AllowVolumeExpansion: m.tp.enableResizing, } - class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup) + class, claim, pod := startBusyBoxPod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup) if class != nil { m.sc[class.Name] = class @@ -331,11 +331,11 @@ func (m *mockDriverSetup) createPodWithFSGroup(fsGroup *int64) (*storagev1.Stora return class, claim, pod } -func (m *mockDriverSetup) createPodWithSELinux(accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { ginkgo.By("Creating pod with SELinux context") f := m.f nodeSelection := m.config.ClientNodeSelection - sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") + sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "") scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -346,7 +346,7 @@ func (m *mockDriverSetup) createPodWithSELinux(accessModes []v1.PersistentVolume AllowVolumeExpansion: m.tp.enableResizing, MountOptions: mountOptions, } - class, claim := createClaim(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes) + class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes) pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts) framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err) @@ -474,7 +474,7 @@ func createSC(cs clientset.Interface, t testsuites.StorageClassTest, scName, ns return class } -func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) { +func createClaim(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) { class := createSC(cs, t, scName, ns) claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: t.ClaimSize, @@ -487,22 +487,22 @@ func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2e if !t.DelayBinding { pvcClaims := []*v1.PersistentVolumeClaim{claim} - _, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) + _, err = e2epv.WaitForPVClaimBoundPhase(ctx, cs, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err) } return class, claim } -func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { - class, claim := createClaim(cs, t, node, scName, ns, nil) +func startPausePod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + class, claim := createClaim(ctx, cs, t, node, scName, ns, nil) pod, err := startPausePodWithClaim(cs, claim, node, ns) framework.ExpectNoError(err, "Failed to create pause pod: %v", err) return class, claim, pod } -func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { - class, claim := createClaim(cs, t, node, scName, ns, nil) +func startBusyBoxPod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + class, claim := createClaim(ctx, cs, t, node, scName, ns, nil) pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup) framework.ExpectNoError(err, "Failed to create busybox pod: %v", err) return class, claim, pod @@ -668,7 +668,7 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } -func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error { +func checkPodLogs(ctx context.Context, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error { expectedAttributes := map[string]string{} if expectPodInfo { expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name @@ -690,7 +690,7 @@ func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, e foundAttributes := sets.NewString() numNodePublishVolume := 0 numNodeUnpublishVolume := 0 - calls, err := getCalls() + calls, err := getCalls(ctx) if err != nil { return err } @@ -776,8 +776,8 @@ func createPreHook(method string, callback func(counter int64) error) *drivers.H // // Only permanent errors are returned. Other errors are logged and no // calls are returned. The caller is expected to retry. -func compareCSICalls(trackedCalls []string, expectedCallSequence []csiCall, getCalls func() ([]drivers.MockCSICall, error)) ([]drivers.MockCSICall, int, error) { - allCalls, err := getCalls() +func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSequence []csiCall, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error)) ([]drivers.MockCSICall, int, error) { + allCalls, err := getCalls(ctx) if err != nil { framework.Logf("intermittent (?) log retrieval error, proceeding without output: %v", err) return nil, 0, nil diff --git a/test/e2e/storage/csi_mock/csi_attach_volume.go b/test/e2e/storage/csi_mock/csi_attach_volume.go index 5fe1d69fa30..ae6edcf3cfb 100644 --- a/test/e2e/storage/csi_mock/csi_attach_volume.go +++ b/test/e2e/storage/csi_mock/csi_attach_volume.go @@ -71,23 +71,23 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { test := t ginkgo.It(t.name, func(ctx context.Context) { var err error - m.init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) + m.init(ctx, testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) ginkgo.DeferCleanup(m.cleanup) volumeType := test.volumeType if volumeType == "" { volumeType = pvcReference } - _, claim, pod := m.createPod(volumeType) + _, claim, pod := m.createPod(ctx, volumeType) if pod == nil { return } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) ginkgo.By("Checking if VolumeAttachment was created for the pod") testConfig := storageframework.ConvertTestConfig(m.config) - attachmentName := e2evolume.GetVolumeAttachmentName(m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace) + attachmentName := e2evolume.GetVolumeAttachmentName(ctx, m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace) _, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -109,10 +109,10 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { ginkgo.Context("CSI CSIDriver deployment after pod creation using non-attachable mock driver", func() { ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) { var err error - m.init(testParameters{registerDriver: false, disableAttach: true}) + m.init(ctx, testParameters{registerDriver: false, disableAttach: true}) ginkgo.DeferCleanup(m.cleanup) - _, claim, pod := m.createPod(pvcReference) // late binding as specified above + _, claim, pod := m.createPod(ctx, pvcReference) // late binding as specified above if pod == nil { return } @@ -126,9 +126,9 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { }.AsSelector().String() msg := "AttachVolume.Attach failed for volume" - err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart) + err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart) if err != nil { - podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second) + podErr := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace, 10*time.Second) framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed") // Events are unreliable, don't depend on the event. It's used only to speed up the test. framework.Logf("Attach should fail and the corresponding event should show up, error: %v", err) @@ -137,7 +137,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { // VolumeAttachment should be created because the default value for CSI attachable is true ginkgo.By("Checking if VolumeAttachment was created for the pod") testConfig := storageframework.ConvertTestConfig(m.config) - attachmentName := e2evolume.GetVolumeAttachmentName(m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace) + attachmentName := e2evolume.GetVolumeAttachmentName(ctx, m.cs, testConfig, m.provisioner, claim.Name, claim.Namespace) _, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { NewDriverName: "csi-mock-" + f.UniqueName, CanAttach: &canAttach, } - err = utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error { + err = utils.CreateFromManifests(ctx, f, driverNamespace, func(item interface{}) error { return utils.PatchCSIDeployment(f, o, item) }, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml") if err != nil { @@ -164,13 +164,13 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { } ginkgo.By("Wait for the pod in running status") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) ginkgo.By(fmt.Sprintf("Wait for the volumeattachment to be deleted up to %v", csiVolumeAttachmentTimeout)) // This step can be slow because we have to wait either a NodeUpdate event happens or // the detachment for this volume timeout so that we can do a force detach. - err = e2evolume.WaitForVolumeAttachmentTerminated(attachmentName, m.cs, csiVolumeAttachmentTimeout) + err = e2evolume.WaitForVolumeAttachmentTerminated(ctx, attachmentName, m.cs, csiVolumeAttachmentTimeout) framework.ExpectNoError(err, "Failed to delete VolumeAttachment: %v", err) }) }) diff --git a/test/e2e/storage/csi_mock/csi_fsgroup_mount.go b/test/e2e/storage/csi_mock/csi_fsgroup_mount.go index bade243bc9e..837acf1ff90 100644 --- a/test/e2e/storage/csi_mock/csi_fsgroup_mount.go +++ b/test/e2e/storage/csi_mock/csi_fsgroup_mount.go @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("CSI Mock fsgroup as mount option", func() { if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping") } - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, enableVolumeMountGroup: t.enableVolumeMountGroup, @@ -67,9 +67,9 @@ var _ = utils.SIGDescribe("CSI Mock fsgroup as mount option", func() { fsGroup := &fsGroupVal fsGroupStr := strconv.FormatInt(fsGroupVal, 10 /* base */) - _, _, pod := m.createPodWithFSGroup(fsGroup) /* persistent volume */ + _, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */ - err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to start pod") if t.enableVolumeMountGroup { diff --git a/test/e2e/storage/csi_mock/csi_fsgroup_policy.go b/test/e2e/storage/csi_mock/csi_fsgroup_policy.go index 65d157ace4f..f57897856b1 100644 --- a/test/e2e/storage/csi_mock/csi_fsgroup_policy.go +++ b/test/e2e/storage/csi_mock/csi_fsgroup_policy.go @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() { if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping") } - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, fsGroupPolicy: &test.fsGroupPolicy, @@ -82,13 +82,13 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() { fsGroupVal := int64(rand.Int63n(20000) + 1024) fsGroup := &fsGroupVal - _, _, pod := m.createPodWithFSGroup(fsGroup) /* persistent volume */ + _, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */ mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath dirName := mountPath + "/" + f.UniqueName fileName := dirName + "/" + f.UniqueName - err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to start pod") // Create the subdirectory to ensure that fsGroup propagates diff --git a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go index 09b71dd973f..bb775b6eb72 100644 --- a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go +++ b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go @@ -138,19 +138,19 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { if test.nodeStageHook != nil { hooks = createPreHook("NodeStageVolume", test.nodeStageHook) } - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, hooks: hooks, }) ginkgo.DeferCleanup(m.cleanup) - _, claim, pod := m.createPod(pvcReference) + _, claim, pod := m.createPod(ctx, pvcReference) if pod == nil { return } // Wait for PVC to get bound to make sure the CSI driver is fully started. - err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, f.ClientSet, f.Namespace.Name, claim.Name, time.Second, framework.ClaimProvisionTimeout) + err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, f.ClientSet, f.Namespace.Name, claim.Name, time.Second, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "while waiting for PVC to get provisioned") ginkgo.By("Waiting for expected CSI calls") @@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls) } time.Sleep(1 * time.Second) - _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.driver.GetCalls) + _, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls) framework.ExpectNoError(err, "while waiting for initial CSI calls") if index == 0 { // No CSI call received yet @@ -176,17 +176,17 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { if test.expectPodRunning { ginkgo.By("Waiting for pod to be running") - err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) } ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting") ginkgo.By("Waiting for all remaining expected CSI calls") err = wait.Poll(time.Second, csiUnstageWaitTimeout, func() (done bool, err error) { - _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.driver.GetCalls) + _, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls) if err != nil { return true, err } @@ -276,39 +276,39 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { return test.nodeUnstageHook(counter, pod) }) } - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, hooks: hooks, }) ginkgo.DeferCleanup(m.cleanup) - _, claim, pod := m.createPod(pvcReference) + _, claim, pod := m.createPod(ctx, pvcReference) if pod == nil { return } // Wait for PVC to get bound to make sure the CSI driver is fully started. - err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, f.ClientSet, f.Namespace.Name, claim.Name, time.Second, framework.ClaimProvisionTimeout) + err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, f.ClientSet, f.Namespace.Name, claim.Name, time.Second, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "while waiting for PVC to get provisioned") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "while waiting for the first pod to start") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting the first pod") // Create the second pod pod, err = m.createPodWithPVC(claim) framework.ExpectNoError(err, "while creating the second pod") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "while waiting for the second pod to start") // The second pod is running and kubelet can't call NodeUnstage of the first one. // Therefore incrementing the pod counter is safe here. atomic.AddInt64(&deletedPodNumber, 1) - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting the second pod") ginkgo.By("Waiting for all remaining expected CSI calls") err = wait.Poll(time.Second, csiUnstageWaitTimeout, func() (done bool, err error) { - _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.driver.GetCalls) + _, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls) if err != nil { return true, err } diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index 743c9bfc896..d6c6e4265d1 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping") } var nodeStageMountOpts, nodePublishMountOpts []string - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, enableSELinuxMount: &t.seLinuxEnabled, @@ -110,8 +110,8 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { podSELinuxOpts = &seLinuxOpts } - _, _, pod := m.createPodWithSELinux(accessModes, t.mountOptions, podSELinuxOpts) - err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + _, _, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, podSELinuxOpts) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to start pod") framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts) diff --git a/test/e2e/storage/csi_mock/csi_service_account_token.go b/test/e2e/storage/csi_mock/csi_service_account_token.go index 53811ab9881..60e36fcbeb6 100644 --- a/test/e2e/storage/csi_mock/csi_service_account_token.go +++ b/test/e2e/storage/csi_mock/csi_service_account_token.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() { test := test csiServiceAccountTokenEnabled := test.tokenRequests != nil ginkgo.It(test.desc, func(ctx context.Context) { - m.init(testParameters{ + m.init(ctx, testParameters{ registerDriver: test.deployCSIDriverObject, tokenRequests: test.tokenRequests, requiresRepublish: &csiServiceAccountTokenEnabled, @@ -71,11 +71,11 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() { ginkgo.DeferCleanup(m.cleanup) - _, _, pod := m.createPod(pvcReference) + _, _, pod := m.createPod(ctx, pvcReference) if pod == nil { return } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) // sleep to make sure RequiresRepublish triggers more than 1 NodePublishVolume @@ -86,11 +86,11 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() { } ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting") ginkgo.By("Checking CSI driver logs") - err = checkPodLogs(m.driver.GetCalls, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume) + err = checkPodLogs(ctx, m.driver.GetCalls, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume) framework.ExpectNoError(err) }) } diff --git a/test/e2e/storage/csi_mock/csi_snapshot.go b/test/e2e/storage/csi_mock/csi_snapshot.go index f4b97e02c80..2b720a4e4fb 100644 --- a/test/e2e/storage/csi_mock/csi_snapshot.go +++ b/test/e2e/storage/csi_mock/csi_snapshot.go @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { if test.createSnapshotHook != nil { hooks = createPreHook("CreateSnapshot", test.createSnapshotHook) } - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, enableSnapshot: true, @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { defer cancel() ginkgo.DeferCleanup(m.cleanup) - sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") + sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "") ginkgo.By("Creating storage class") class, err := m.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create class: %v", err) @@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { ginkgo.By("Creating snapshot") // TODO: Test VolumeSnapshots with Retain policy parameters := map[string]string{} - snapshotClass, snapshot := storageframework.CreateSnapshot(sDriver, m.config, storageframework.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts, parameters) + snapshotClass, snapshot := storageframework.CreateSnapshot(ctx, sDriver, m.config, storageframework.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts, parameters) framework.ExpectNoError(err, "failed to create snapshot") m.vsc[snapshotClass.GetName()] = snapshotClass volumeSnapshotName := snapshot.GetName() @@ -110,11 +110,11 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { framework.ExpectNoError(err) ginkgo.By("Wait for PVC to be Bound") - _, err = e2epv.WaitForPVClaimBoundPhase(m.cs, []*v1.PersistentVolumeClaim{claim}, 1*time.Minute) + _, err = e2epv.WaitForPVClaimBoundPhase(ctx, m.cs, []*v1.PersistentVolumeClaim{claim}, 1*time.Minute) framework.ExpectNoError(err, "Failed to create claim: %v", err) ginkgo.By(fmt.Sprintf("Delete PVC %s", claim.Name)) - err = e2epv.DeletePersistentVolumeClaim(m.cs, claim.Name, claim.Namespace) + err = e2epv.DeletePersistentVolumeClaim(ctx, m.cs, claim.Name, claim.Namespace) framework.ExpectNoError(err, "failed to delete pvc") ginkgo.By("Get PVC from API server and verify deletion timestamp is set") @@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { } ginkgo.By(fmt.Sprintf("Get VolumeSnapshotContent bound to VolumeSnapshot %s", snapshot.GetName())) - snapshotContent := utils.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot, f.Timeouts.SnapshotCreate) + snapshotContent := utils.GetSnapshotContentFromSnapshot(ctx, m.config.Framework.DynamicClient, snapshot, f.Timeouts.SnapshotCreate) volumeSnapshotContentName := snapshotContent.GetName() ginkgo.By(fmt.Sprintf("Verify VolumeSnapshotContent %s contains finalizer %s", snapshot.GetName(), volumeSnapshotContentFinalizer)) @@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { // Refer https://github.com/kubernetes/kubernetes/pull/99167#issuecomment-781670012 if claim != nil && claim.Spec.VolumeName != "" { ginkgo.By(fmt.Sprintf("Wait for PV %s to be deleted", claim.Spec.VolumeName)) - err = e2epv.WaitForPersistentVolumeDeleted(m.cs, claim.Spec.VolumeName, framework.Poll, 3*time.Minute) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, m.cs, claim.Spec.VolumeName, framework.Poll, 3*time.Minute) framework.ExpectNoError(err, fmt.Sprintf("failed to delete PV %s", claim.Spec.VolumeName)) } @@ -162,11 +162,11 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { framework.ExpectNoError(err) ginkgo.By("Delete VolumeSnapshot") - err = utils.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout) + err = utils.DeleteAndWaitSnapshot(ctx, m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshot %s", volumeSnapshotName)) ginkgo.By(fmt.Sprintf("Wait for VolumeSnapshotContent %s to be deleted", volumeSnapshotContentName)) - err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) + err = utils.WaitForGVRDeletion(ctx, m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshotContent %s", volumeSnapshotContentName)) }) } @@ -208,7 +208,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { test := test ginkgo.It(test.name, func(ctx context.Context) { hooks := createPreHook("CreateSnapshot", test.createSnapshotHook) - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, enableSnapshot: true, @@ -223,7 +223,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { var sc *storagev1.StorageClass if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") + sc = dDriver.GetDynamicProvisionStorageClass(ctx, m.config, "") } ginkgo.By("Creating storage class") class, err := m.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) @@ -239,7 +239,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { framework.ExpectNoError(err, "Failed to create claim: %v", err) ginkgo.By("Wait for PVC to be Bound") - _, err = e2epv.WaitForPVClaimBoundPhase(m.cs, []*v1.PersistentVolumeClaim{pvc}, 1*time.Minute) + _, err = e2epv.WaitForPVClaimBoundPhase(ctx, m.cs, []*v1.PersistentVolumeClaim{pvc}, 1*time.Minute) framework.ExpectNoError(err, "Failed to create claim: %v", err) m.pvcs = append(m.pvcs, pvc) @@ -265,9 +265,9 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { CSISnapshotterSecretNamespaceAnnotation: f.Namespace.Name, } - _, snapshot := storageframework.CreateSnapshot(sDriver, m.config, storageframework.DynamicSnapshotDelete, pvc.Name, pvc.Namespace, f.Timeouts, parameters) + _, snapshot := storageframework.CreateSnapshot(ctx, sDriver, m.config, storageframework.DynamicSnapshotDelete, pvc.Name, pvc.Namespace, f.Timeouts, parameters) framework.ExpectNoError(err, "failed to create snapshot") - snapshotcontent := utils.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot, f.Timeouts.SnapshotCreate) + snapshotcontent := utils.GetSnapshotContentFromSnapshot(ctx, m.config.Framework.DynamicClient, snapshot, f.Timeouts.SnapshotCreate) if annotations, ok = snapshotcontent.Object["metadata"].(map[string]interface{})["annotations"]; !ok { framework.Failf("Unable to get volume snapshot content annotations") } @@ -298,7 +298,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { for _, test := range tests { test := test ginkgo.It(test.name, func(ctx context.Context) { - m.init(testParameters{ + m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, enableSnapshot: true, @@ -310,13 +310,13 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { } ginkgo.DeferCleanup(m.cleanup) - metricsGrabber, err := e2emetrics.NewMetricsGrabber(m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true) + metricsGrabber, err := e2emetrics.NewMetricsGrabber(ctx, m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true) if err != nil { framework.Failf("Error creating metrics grabber : %v", err) } // Grab initial metrics - if this fails, snapshot controller metrics are not setup. Skip in this case. - _, err = metricsGrabber.GrabFromSnapshotController(framework.TestContext.SnapshotControllerPodName, framework.TestContext.SnapshotControllerHTTPPort) + _, err = metricsGrabber.GrabFromSnapshotController(ctx, framework.TestContext.SnapshotControllerPodName, framework.TestContext.SnapshotControllerHTTPPort) if err != nil { e2eskipper.Skipf("Snapshot controller metrics not found -- skipping") } @@ -330,19 +330,19 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { "", test.pattern) createSnapshotMetrics := newSnapshotControllerMetrics(metricsTestConfig, metricsGrabber) - originalCreateSnapshotCount, _ := createSnapshotMetrics.getSnapshotControllerMetricValue() + originalCreateSnapshotCount, _ := createSnapshotMetrics.getSnapshotControllerMetricValue(ctx) metricsTestConfig.operationName = "CreateSnapshotAndReady" createSnapshotAndReadyMetrics := newSnapshotControllerMetrics(metricsTestConfig, metricsGrabber) - originalCreateSnapshotAndReadyCount, _ := createSnapshotAndReadyMetrics.getSnapshotControllerMetricValue() + originalCreateSnapshotAndReadyCount, _ := createSnapshotAndReadyMetrics.getSnapshotControllerMetricValue(ctx) metricsTestConfig.operationName = "DeleteSnapshot" deleteSnapshotMetrics := newSnapshotControllerMetrics(metricsTestConfig, metricsGrabber) - originalDeleteSnapshotCount, _ := deleteSnapshotMetrics.getSnapshotControllerMetricValue() + originalDeleteSnapshotCount, _ := deleteSnapshotMetrics.getSnapshotControllerMetricValue(ctx) ginkgo.By("Creating storage class") var sc *storagev1.StorageClass if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") + sc = dDriver.GetDynamicProvisionStorageClass(ctx, m.config, "") } class, err := m.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create storage class: %v", err) @@ -357,28 +357,28 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { framework.ExpectNoError(err, "Failed to create claim: %v", err) ginkgo.By("Wait for PVC to be Bound") - _, err = e2epv.WaitForPVClaimBoundPhase(m.cs, []*v1.PersistentVolumeClaim{pvc}, 1*time.Minute) + _, err = e2epv.WaitForPVClaimBoundPhase(ctx, m.cs, []*v1.PersistentVolumeClaim{pvc}, 1*time.Minute) framework.ExpectNoError(err, "Failed to create claim: %v", err) ginkgo.By("Creating snapshot") parameters := map[string]string{} - sr := storageframework.CreateSnapshotResource(sDriver, m.config, test.pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters) + sr := storageframework.CreateSnapshotResource(ctx, sDriver, m.config, test.pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters) framework.ExpectNoError(err, "failed to create snapshot") ginkgo.By("Checking for CreateSnapshot metrics") - createSnapshotMetrics.waitForSnapshotControllerMetric(originalCreateSnapshotCount+1.0, f.Timeouts.SnapshotControllerMetrics) + createSnapshotMetrics.waitForSnapshotControllerMetric(ctx, originalCreateSnapshotCount+1.0, f.Timeouts.SnapshotControllerMetrics) ginkgo.By("Checking for CreateSnapshotAndReady metrics") - err = utils.WaitForSnapshotReady(m.config.Framework.DynamicClient, pvc.Namespace, sr.Vs.GetName(), framework.Poll, f.Timeouts.SnapshotCreate) + err = utils.WaitForSnapshotReady(ctx, m.config.Framework.DynamicClient, pvc.Namespace, sr.Vs.GetName(), framework.Poll, f.Timeouts.SnapshotCreate) framework.ExpectNoError(err, "failed to wait for snapshot ready") - createSnapshotAndReadyMetrics.waitForSnapshotControllerMetric(originalCreateSnapshotAndReadyCount+1.0, f.Timeouts.SnapshotControllerMetrics) + createSnapshotAndReadyMetrics.waitForSnapshotControllerMetric(ctx, originalCreateSnapshotAndReadyCount+1.0, f.Timeouts.SnapshotControllerMetrics) // delete the snapshot and check if the snapshot is deleted deleteSnapshot(m.cs, m.config, sr.Vs) ginkgo.By("check for delete metrics") metricsTestConfig.operationName = "DeleteSnapshot" - deleteSnapshotMetrics.waitForSnapshotControllerMetric(originalDeleteSnapshotCount+1.0, f.Timeouts.SnapshotControllerMetrics) + deleteSnapshotMetrics.waitForSnapshotControllerMetric(ctx, originalDeleteSnapshotCount+1.0, f.Timeouts.SnapshotControllerMetrics) }) } }) @@ -489,11 +489,11 @@ func newSnapshotControllerMetrics(cfg snapshotMetricsTestConfig, metricsGrabber } } -func (scm *snapshotControllerMetrics) waitForSnapshotControllerMetric(expectedValue float64, timeout time.Duration) { +func (scm *snapshotControllerMetrics) waitForSnapshotControllerMetric(ctx context.Context, expectedValue float64, timeout time.Duration) { metricKey := scm.getMetricKey() if successful := utils.WaitUntil(10*time.Second, timeout, func() bool { // get metric value - actualValue, err := scm.getSnapshotControllerMetricValue() + actualValue, err := scm.getSnapshotControllerMetricValue(ctx) if err != nil { return false } @@ -513,11 +513,11 @@ func (scm *snapshotControllerMetrics) waitForSnapshotControllerMetric(expectedVa framework.Failf("Unable to get valid snapshot controller metrics after %v", timeout) } -func (scm *snapshotControllerMetrics) getSnapshotControllerMetricValue() (float64, error) { +func (scm *snapshotControllerMetrics) getSnapshotControllerMetricValue(ctx context.Context) (float64, error) { metricKey := scm.getMetricKey() // grab and parse into readable format - err := scm.grabSnapshotControllerMetrics() + err := scm.grabSnapshotControllerMetrics(ctx) if err != nil { return 0, err } @@ -556,9 +556,9 @@ func (scm *snapshotControllerMetrics) showMetricsFailure(metricKey string) { } } -func (scm *snapshotControllerMetrics) grabSnapshotControllerMetrics() error { +func (scm *snapshotControllerMetrics) grabSnapshotControllerMetrics(ctx context.Context) error { // pull all metrics - metrics, err := scm.metricsGrabber.GrabFromSnapshotController(framework.TestContext.SnapshotControllerPodName, framework.TestContext.SnapshotControllerHTTPPort) + metrics, err := scm.metricsGrabber.GrabFromSnapshotController(ctx, framework.TestContext.SnapshotControllerPodName, framework.TestContext.SnapshotControllerHTTPPort) if err != nil { return err } diff --git a/test/e2e/storage/csi_mock/csi_storage_capacity.go b/test/e2e/storage/csi_mock/csi_storage_capacity.go index fa09628198f..b3ed662708e 100644 --- a/test/e2e/storage/csi_mock/csi_storage_capacity.go +++ b/test/e2e/storage/csi_mock/csi_storage_capacity.go @@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { }) } - m.init(params) + m.init(ctx, params) ginkgo.DeferCleanup(m.cleanup) ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) @@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { framework.ExpectNoError(err, "create PVC watch") defer pvcWatch.Stop() - sc, claim, pod := m.createPod(pvcReference) + sc, claim, pod := m.createPod(ctx, pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod") bindingMode := storagev1.VolumeBindingImmediate if test.lateBinding { @@ -156,9 +156,9 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { } framework.ExpectEqual(*sc.VolumeBindingMode, bindingMode, "volume binding mode") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to start pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "failed to delete pod") err = m.cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete claim") @@ -180,8 +180,8 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { } var calls []drivers.MockCSICall - err = wait.PollImmediateUntil(time.Second, func() (done bool, err error) { - c, index, err := compareCSICalls(deterministicCalls, expected, m.driver.GetCalls) + err = wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (done bool, err error) { + c, index, err := compareCSICalls(ctx, deterministicCalls, expected, m.driver.GetCalls) if err != nil { return true, fmt.Errorf("error waiting for expected CSI calls: %s", err) } @@ -195,7 +195,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { return true, nil } return false, nil - }, ctx.Done()) + }) framework.ExpectNoError(err, "while waiting for all CSI calls") // The capacity error is dealt with in two different ways. @@ -325,7 +325,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { test := t ginkgo.It(t.name, func(ctx context.Context) { scName := "mock-csi-storage-capacity-" + f.UniqueName - m.init(testParameters{ + m.init(ctx, testParameters{ registerDriver: true, scName: scName, storageCapacity: test.storageCapacity, @@ -356,7 +356,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { syncDelay := 5 * time.Second time.Sleep(syncDelay) - sc, _, pod := m.createPod(pvcReference) // late binding as specified above + sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used") waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) @@ -385,7 +385,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { } ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting") }) } diff --git a/test/e2e/storage/csi_mock/csi_volume_expansion.go b/test/e2e/storage/csi_mock/csi_volume_expansion.go index a33f79232e4..2cdf43d4395 100644 --- a/test/e2e/storage/csi_mock/csi_volume_expansion.go +++ b/test/e2e/storage/csi_mock/csi_volume_expansion.go @@ -114,22 +114,22 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { tp.registerDriver = true } - m.init(tp) + m.init(ctx, tp) ginkgo.DeferCleanup(m.cleanup) - sc, pvc, pod := m.createPod(pvcReference) + sc, pvc, pod := m.createPod(ctx, pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") if !*sc.AllowVolumeExpansion { framework.Fail("failed creating sc with allowed expansion") } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, m.cs) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -139,18 +139,18 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { framework.Failf("error updating pvc size %q", pvc.Name) } if test.expectFailure { - err = testsuites.WaitForResizingCondition(pvc, m.cs, csiResizingConditionWait) + err = testsuites.WaitForResizingCondition(ctx, pvc, m.cs, csiResizingConditionWait) framework.ExpectError(err, "unexpected resizing condition on PVC") return } ginkgo.By("Waiting for persistent volume resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for CSI PV resize to finish") checkPVCSize := func() { ginkgo.By("Waiting for PVC resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, m.cs) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, m.cs) framework.ExpectNoError(err, "while waiting for PVC resize to finish") pvcConditions := pvc.Status.Conditions @@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { checkPVCSize() } else { ginkgo.By("Checking for conditions on pvc") - npvc, err := testsuites.WaitForPendingFSResizeCondition(pvc, m.cs) + npvc, err := testsuites.WaitForPendingFSResizeCondition(ctx, pvc, m.cs) framework.ExpectNoError(err, "While waiting for pvc to have fs resizing condition") pvc = npvc @@ -172,7 +172,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { } ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting pod for resizing") ginkgo.By("Creating a new pod with same volume") @@ -208,22 +208,22 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { params.registerDriver = true } - m.init(params) + m.init(ctx, params) ginkgo.DeferCleanup(m.cleanup) - sc, pvc, pod := m.createPod(pvcReference) + sc, pvc, pod := m.createPod(ctx, pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") if !*sc.AllowVolumeExpansion { framework.Fail("failed creating sc with allowed expansion") } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, m.cs) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -234,11 +234,11 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { } ginkgo.By("Waiting for persistent volume resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for PV resize to finish") ginkgo.By("Waiting for PVC resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, m.cs) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, m.cs) framework.ExpectNoError(err, "while waiting for PVC to finish") pvcConditions := pvc.Status.Conditions @@ -285,22 +285,22 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { params.hooks = createExpansionHook(test.simulatedCSIDriverError) } - m.init(params) + m.init(ctx, params) ginkgo.DeferCleanup(m.cleanup) - sc, pvc, pod := m.createPod(pvcReference) + sc, pvc, pod := m.createPod(ctx, pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") if !*sc.AllowVolumeExpansion { framework.Fail("failed creating sc with allowed expansion") } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) ginkgo.By("Expanding current pvc") newSize := resource.MustParse(test.pvcRequestSize) - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, m.cs) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -311,9 +311,9 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { } if test.simulatedCSIDriverError == expansionSuccess { - validateExpansionSuccess(pvc, m, test, test.allocatedResource) + validateExpansionSuccess(ctx, pvc, m, test, test.allocatedResource) } else { - validateRecoveryBehaviour(pvc, m, test) + validateRecoveryBehaviour(ctx, pvc, m, test) } }) } @@ -321,7 +321,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { }) }) -func validateRecoveryBehaviour(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup, test recoveryTest) { +func validateRecoveryBehaviour(ctx context.Context, pvc *v1.PersistentVolumeClaim, m *mockDriverSetup, test recoveryTest) { var err error ginkgo.By("Waiting for resizer to set allocated resource") err = waitForAllocatedResource(pvc, m, test.allocatedResource) @@ -332,7 +332,7 @@ func validateRecoveryBehaviour(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup framework.ExpectNoError(err, "While waiting for resize status to be set") ginkgo.By("Recover pvc size") - newPVC, err := testsuites.ExpandPVCSize(pvc, test.recoverySize, m.cs) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, test.recoverySize, m.cs) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -344,7 +344,7 @@ func validateRecoveryBehaviour(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup // if expansion failed on controller with final error, then recovery should be possible if test.simulatedCSIDriverError == expansionFailedOnController { - validateExpansionSuccess(pvc, m, test, test.recoverySize.String()) + validateExpansionSuccess(ctx, pvc, m, test, test.recoverySize.String()) return } @@ -369,14 +369,14 @@ func validateRecoveryBehaviour(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup } } -func validateExpansionSuccess(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup, test recoveryTest, expectedAllocatedSize string) { +func validateExpansionSuccess(ctx context.Context, pvc *v1.PersistentVolumeClaim, m *mockDriverSetup, test recoveryTest, expectedAllocatedSize string) { var err error ginkgo.By("Waiting for persistent volume resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for PV resize to finish") ginkgo.By("Waiting for PVC resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, m.cs) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, m.cs) framework.ExpectNoError(err, "while waiting for PVC to finish") pvcConditions := pvc.Status.Conditions diff --git a/test/e2e/storage/csi_mock/csi_volume_limit.go b/test/e2e/storage/csi_mock/csi_volume_limit.go index bc0714c04d5..c88db2e2985 100644 --- a/test/e2e/storage/csi_mock/csi_volume_limit.go +++ b/test/e2e/storage/csi_mock/csi_volume_limit.go @@ -43,7 +43,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error - m.init(testParameters{attachLimit: 2}) + m.init(ctx, testParameters{attachLimit: 2}) ginkgo.DeferCleanup(m.cleanup) nodeName := m.config.ClientNodeSelection.Name @@ -54,19 +54,19 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2)) - _, _, pod1 := m.createPod(pvcReference) + _, _, pod1 := m.createPod(ctx, pvcReference) gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - _, _, pod2 := m.createPod(pvcReference) + _, _, pod2 := m.createPod(ctx, pvcReference) gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace) framework.ExpectNoError(err, "Failed to start pod2: %v", err) - _, _, pod3 := m.createPod(pvcReference) + _, _, pod3 := m.createPod(ctx, pvcReference) gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod") err = waitForMaxVolumeCondition(pod3, m.cs) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) @@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { ginkgo.It("should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error - m.init(testParameters{attachLimit: 1}) + m.init(ctx, testParameters{attachLimit: 1}) ginkgo.DeferCleanup(m.cleanup) nodeName := m.config.ClientNodeSelection.Name @@ -86,13 +86,13 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 1)) - _, _, pod1 := m.createPod(pvcReference) + _, _, pod1 := m.createPod(ctx, pvcReference) gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating pod with persistent volume") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - _, _, pod2 := m.createPod(genericEphemeral) + _, _, pod2 := m.createPod(ctx, genericEphemeral) gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod with ephemeral volume") err = waitForMaxVolumeCondition(pod2, m.cs) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2) @@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { ginkgo.It("should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error - m.init(testParameters{attachLimit: 1}) + m.init(ctx, testParameters{attachLimit: 1}) ginkgo.DeferCleanup(m.cleanup) nodeName := m.config.ClientNodeSelection.Name @@ -112,13 +112,13 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 1)) - _, _, pod1 := m.createPod(genericEphemeral) + _, _, pod1 := m.createPod(ctx, genericEphemeral) gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating pod with persistent volume") - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - _, _, pod2 := m.createPod(pvcReference) + _, _, pod2 := m.createPod(ctx, pvcReference) gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod with ephemeral volume") err = waitForMaxVolumeCondition(pod2, m.cs) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2) diff --git a/test/e2e/storage/csi_mock/csi_workload.go b/test/e2e/storage/csi_mock/csi_workload.go index 6d34229ccac..443b0d3b424 100644 --- a/test/e2e/storage/csi_mock/csi_workload.go +++ b/test/e2e/storage/csi_mock/csi_workload.go @@ -83,7 +83,7 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() { for _, t := range tests { test := t ginkgo.It(t.name, func(ctx context.Context) { - m.init(testParameters{ + m.init(ctx, testParameters{ registerDriver: test.deployClusterRegistrar, podInfo: test.podInfoOnMount}) @@ -93,11 +93,11 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() { if test.expectEphemeral { withVolume = csiEphemeral } - _, _, pod := m.createPod(withVolume) + _, _, pod := m.createPod(ctx, withVolume) if pod == nil { return } - err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) // If we expect an ephemeral volume, the feature has to be enabled. @@ -111,11 +111,11 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() { } ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(m.cs, pod) + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) framework.ExpectNoError(err, "while deleting") ginkgo.By("Checking CSI driver logs") - err = checkPodLogs(m.driver.GetCalls, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1) + err = checkPodLogs(ctx, m.driver.GetCalls, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1) framework.ExpectNoError(err) }) } diff --git a/test/e2e/storage/csistoragecapacity.go b/test/e2e/storage/csistoragecapacity.go index 44cb76d0176..2e352626d50 100644 --- a/test/e2e/storage/csistoragecapacity.go +++ b/test/e2e/storage/csistoragecapacity.go @@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { ginkgo.By("getting /apis/storage.k8s.io") { group := &metav1.APIGroup{} - err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/storage.k8s.io").Do(context.TODO()).Into(group) + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/storage.k8s.io").Do(ctx).Into(group) framework.ExpectNoError(err) found := false for _, version := range group.Versions { @@ -129,50 +129,50 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { // Main resource create/read/update/watch operations ginkgo.By("creating") - createdCSC, err := cscClient.Create(context.TODO(), csc, metav1.CreateOptions{}) + createdCSC, err := cscClient.Create(ctx, csc, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = cscClient.Create(context.TODO(), csc, metav1.CreateOptions{}) + _, err = cscClient.Create(ctx, csc, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { framework.Failf("expected 409, got %#v", err) } - _, err = cscClient.Create(context.TODO(), csc2, metav1.CreateOptions{}) + _, err = cscClient.Create(ctx, csc2, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("watching") framework.Logf("starting watch") - cscWatch, err := cscClient.Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscWatch, err := cscClient.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) - cscWatchNoNamespace, err := cscClientNoNamespace.Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscWatchNoNamespace, err := cscClientNoNamespace.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) // added for a watch - _, err = cscClient.Create(context.TODO(), csc3, metav1.CreateOptions{}) + _, err = cscClient.Create(ctx, csc3, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("getting") - gottenCSC, err := cscClient.Get(context.TODO(), csc.Name, metav1.GetOptions{}) + gottenCSC, err := cscClient.Get(ctx, csc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(gottenCSC.UID, createdCSC.UID) ginkgo.By("listing in namespace") - cscs, err := cscClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscs, err := cscClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(cscs.Items), 3, "filtered list should have 3 items, got: %s", cscs) ginkgo.By("listing across namespaces") - cscs, err = cscClientNoNamespace.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscs, err = cscClientNoNamespace.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) framework.ExpectEqual(len(cscs.Items), 3, "filtered list should have 3 items, got: %s", cscs) ginkgo.By("patching") - patchedCSC, err := cscClient.Patch(context.TODO(), createdCSC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + patchedCSC, err := cscClient.Patch(ctx, createdCSC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(patchedCSC.Annotations["patched"], "true", "patched object should have the applied annotation") ginkgo.By("updating") csrToUpdate := patchedCSC.DeepCopy() csrToUpdate.Annotations["updated"] = "true" - updatedCSC, err := cscClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{}) + updatedCSC, err := cscClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(updatedCSC.Annotations["updated"], "true", "updated object should have the applied annotation") @@ -218,9 +218,9 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { // main resource delete operations ginkgo.By("deleting") - err = cscClient.Delete(context.TODO(), createdCSC.Name, metav1.DeleteOptions{}) + err = cscClient.Delete(ctx, createdCSC.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - csc, err = cscClient.Get(context.TODO(), createdCSC.Name, metav1.GetOptions{}) + csc, err = cscClient.Get(ctx, createdCSC.Name, metav1.GetOptions{}) min := 2 max := min switch { @@ -236,7 +236,7 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { default: framework.Failf("CSIStorageCapacitity should have been deleted or have DeletionTimestamp and Finalizers, but instead got: %s", csc) } - cscs, err = cscClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscs, err = cscClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) actualLen := len(cscs.Items) if actualLen < min || actualLen > max { @@ -244,9 +244,9 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { } ginkgo.By("deleting a collection") - err = cscClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + err = cscClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) - cscs, err = cscClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) + cscs, err = cscClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) framework.ExpectNoError(err) for _, csc := range cscs.Items { // Any remaining objects should be marked for deletion diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index b007f660c79..a6d0bc7adc6 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { var node *v1.Node var suffix string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "local") e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -64,7 +64,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { cs = f.ClientSet ns = f.Namespace var err error - node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) suffix = ns.Name }) @@ -76,9 +76,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(ctx, cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) - installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(ctx, cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) volumeSource := v1.VolumeSource{ FlexVolume: &v1.FlexVolumeSource{ Driver: "k8s/" + driverInstallAs, @@ -87,31 +87,31 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { clientPod := getFlexVolumePod(volumeSource, node.Name) ginkgo.By("Creating pod that uses slow format volume") - pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), clientPod, metav1.CreateOptions{}) + pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, clientPod, metav1.CreateOptions{}) framework.ExpectNoError(err) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) ginkgo.By("waiting for volumes to be attached to node") - err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName) + err = waitForVolumesAttached(ctx, cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name) ginkgo.By("waiting for volume-in-use on the node after pod creation") - err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) + err = waitForVolumesInUse(ctx, cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume in use") ginkgo.By("waiting for kubelet to start mounting the volume") time.Sleep(20 * time.Second) ginkgo.By("Deleting the flexvolume pod") - err = e2epod.DeletePodWithWait(cs, pod) + err = e2epod.DeletePodWithWait(ctx, cs, pod) framework.ExpectNoError(err, "in deleting the pod") // Wait a bit for node to sync the volume status time.Sleep(30 * time.Second) ginkgo.By("waiting for volume-in-use on the node after pod deletion") - err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) + err = waitForVolumesInUse(ctx, cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume in use") // Wait for 110s because mount device operation has a sleep of 120 seconds @@ -119,13 +119,13 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { time.Sleep(durationForStuckMount) ginkgo.By("waiting for volume to disappear from node in-use") - err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName) + err = waitForVolumesNotInUse(ctx, cs, node.Name, uniqueVolumeName) framework.ExpectNoError(err, "while waiting for volume to be removed from in-use") ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, node, "k8s", driverInstallAs) + uninstallFlex(ctx, cs, node, "k8s", driverInstallAs) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) - uninstallFlex(cs, nil, "k8s", driverInstallAs) + uninstallFlex(ctx, cs, nil, "k8s", driverInstallAs) }) }) @@ -133,9 +133,9 @@ func getUniqueVolumeName(pod *v1.Pod, driverName string) string { return fmt.Sprintf("k8s/%s/%s", driverName, pod.Spec.Volumes[0].Name) } -func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName string) error { - waitErr := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func waitForVolumesNotInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { + waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) { + node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } @@ -153,9 +153,9 @@ func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName str return nil } -func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName string) error { - waitErr := wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func waitForVolumesAttached(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { + waitErr := wait.PollImmediateWithContext(ctx, 2*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { + node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } @@ -173,9 +173,9 @@ func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName str return nil } -func waitForVolumesInUse(client clientset.Interface, nodeName, volumeName string) error { - waitErr := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func waitForVolumesInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { + waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) { + node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index b134a34a65b..19661d56c03 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -184,7 +184,7 @@ func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPat } } -func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := config.GetUniqueDriverName() parameters := map[string]string{} ns := config.Framework.Namespace.Name @@ -200,25 +200,25 @@ func (h *hostpathCSIDriver) GetCSIDriverName(config *storageframework.PerTestCon return config.GetUniqueDriverName() } -func (h *hostpathCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { +func (h *hostpathCSIDriver) GetSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { snapshotter := config.GetUniqueDriverName() ns := config.Framework.Namespace.Name return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (h *hostpathCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { // Create secondary namespace which will be used for creating driver - driverNamespace := utils.CreateDriverNamespace(f) + driverNamespace := utils.CreateDriverNamespace(ctx, f) driverns := driverNamespace.Name testns := f.Namespace.Name ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) - cancelLogging := utils.StartPodLogs(f, driverNamespace) + cancelLogging := utils.StartPodLogs(ctx, f, driverNamespace) cs := f.ClientSet // The hostpath CSI driver only works when everything runs on the same node. - node, err := e2enode.GetRandomReadySchedulableNode(cs) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) framework.ExpectNoError(err) config := &storageframework.PerTestConfig{ Driver: h, @@ -246,7 +246,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframewor NodeName: node.Name, } - err = utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error { + err = utils.CreateFromManifests(ctx, config.Framework, driverNamespace, func(item interface{}) error { if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { return err } @@ -338,7 +338,7 @@ type MockCSITestDriver interface { // GetCalls returns all currently observed gRPC calls. Only valid // after PrepareTest. - GetCalls() ([]MockCSICall, error) + GetCalls(ctx context.Context) ([]MockCSICall, error) } // CSIMockDriverOpts defines options used for csi driver @@ -524,7 +524,7 @@ func (m *mockCSIDriver) GetDriverInfo() *storageframework.DriverInfo { func (m *mockCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (m *mockCSIDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := config.GetUniqueDriverName() parameters := map[string]string{} ns := config.Framework.Namespace.Name @@ -532,18 +532,18 @@ func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageframework return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (m *mockCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { +func (m *mockCSIDriver) GetSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { snapshotter := m.driverInfo.Name + "-" + config.Framework.UniqueName ns := config.Framework.Namespace.Name return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (m *mockCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { m.clientSet = f.ClientSet // Create secondary namespace which will be used for creating driver - m.driverNamespace = utils.CreateDriverNamespace(f) + m.driverNamespace = utils.CreateDriverNamespace(ctx, f) driverns := m.driverNamespace.Name testns := f.Namespace.Name @@ -552,11 +552,11 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe } else { ginkgo.By("deploying csi mock driver") } - cancelLogging := utils.StartPodLogs(f, m.driverNamespace) + cancelLogging := utils.StartPodLogs(ctx, f, m.driverNamespace) cs := f.ClientSet // pods should be scheduled on the node - node, err := e2enode.GetRandomReadySchedulableNode(cs) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) framework.ExpectNoError(err) embeddedCleanup := func() {} @@ -574,6 +574,9 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe // this process. podname := "csi-mockplugin-0" containername := "mock" + + // Must keep running even after the test context is cancelled + // for cleanup callbacks. ctx, cancel := context.WithCancel(context.Background()) serviceConfig := mockservice.Config{ DisableAttach: !m.attachable, @@ -596,7 +599,6 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe Node: s, } m.embeddedCSIDriver = mockdriver.NewCSIDriver(servers) - l, err := proxy.Listen(ctx, f.ClientSet, f.ClientConfig(), proxy.Addr{ Namespace: m.driverNamespace.Name, @@ -605,6 +607,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe Port: 9000, }, ) + framework.ExpectNoError(err, "start connecting to proxy pod") err = m.embeddedCSIDriver.Start(l, m.interceptGRPC) framework.ExpectNoError(err, "start mock driver") @@ -669,7 +672,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe if m.enableRecoverExpansionFailure { o.Features["csi-resizer"] = []string{"RecoverVolumeExpansionFailure=true"} } - err = utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error { + err = utils.CreateFromManifests(ctx, f, m.driverNamespace, func(item interface{}) error { if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { return err } @@ -704,7 +707,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe ginkgo.DeferCleanup(func(ctx context.Context) { embeddedCleanup() - driverCleanupFunc() + driverCleanupFunc(ctx) }) return config @@ -731,7 +734,7 @@ func (m *mockCSIDriver) interceptGRPC(ctx context.Context, req interface{}, info return } -func (m *mockCSIDriver) GetCalls() ([]MockCSICall, error) { +func (m *mockCSIDriver) GetCalls(ctx context.Context) ([]MockCSICall, error) { if m.embedded { return m.calls.Get(), nil } @@ -746,7 +749,7 @@ func (m *mockCSIDriver) GetCalls() ([]MockCSICall, error) { driverContainerName := "mock" // Load logs of driver pod - log, err := e2epod.GetPodLogs(m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName) + log, err := e2epod.GetPodLogs(ctx, m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName) if err != nil { return nil, fmt.Errorf("could not load CSI driver logs: %s", err) } @@ -854,7 +857,7 @@ func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPatter } } -func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name @@ -867,14 +870,14 @@ func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageframewor return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (g *gcePDCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { +func (g *gcePDCSIDriver) GetSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { snapshotter := g.driverInfo.Name ns := config.Framework.Namespace.Name return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (g *gcePDCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { testns := f.Namespace.Name cfg := &storageframework.PerTestConfig{ Driver: g, @@ -889,10 +892,10 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P ginkgo.By("deploying csi gce-pd driver") // Create secondary namespace which will be used for creating driver - driverNamespace := utils.CreateDriverNamespace(f) + driverNamespace := utils.CreateDriverNamespace(ctx, f) driverns := driverNamespace.Name - cancelLogging := utils.StartPodLogs(f, driverNamespace) + cancelLogging := utils.StartPodLogs(ctx, f, driverNamespace) // It would be safer to rename the gcePD driver, but that // hasn't been done before either and attempts to do so now led to // errors during driver registration, therefore it is disabled @@ -915,12 +918,12 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", } - err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...) + err := utils.CreateFromManifests(ctx, f, driverNamespace, nil, manifests...) if err != nil { framework.Failf("deploying csi gce-pd driver: %v", err) } - if err = WaitForCSIDriverRegistrationOnAllNodes(GCEPDCSIDriverName, f.ClientSet); err != nil { + if err = WaitForCSIDriverRegistrationOnAllNodes(ctx, GCEPDCSIDriverName, f.ClientSet); err != nil { framework.Failf("waiting for csi driver node registration on: %v", err) } @@ -942,13 +945,13 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P // WaitForCSIDriverRegistrationOnAllNodes waits for the CSINode object to be updated // with the given driver on all schedulable nodes. -func WaitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error { - nodes, err := e2enode.GetReadySchedulableNodes(cs) +func WaitForCSIDriverRegistrationOnAllNodes(ctx context.Context, driverName string, cs clientset.Interface) error { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { return err } for _, node := range nodes.Items { - if err := WaitForCSIDriverRegistrationOnNode(node.Name, driverName, cs); err != nil { + if err := WaitForCSIDriverRegistrationOnNode(ctx, node.Name, driverName, cs); err != nil { return err } } @@ -956,7 +959,7 @@ func WaitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Inte } // WaitForCSIDriverRegistrationOnNode waits for the CSINode object generated by the node-registrar on a certain node -func WaitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs clientset.Interface) error { +func WaitForCSIDriverRegistrationOnNode(ctx context.Context, nodeName string, driverName string, cs clientset.Interface) error { framework.Logf("waiting for CSIDriver %v to register on node %v", driverName, nodeName) // About 8.6 minutes timeout @@ -967,7 +970,7 @@ func WaitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c } waitErr := wait.ExponentialBackoff(backoff, func() (bool, error) { - csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + csiNode, err := cs.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1001,21 +1004,21 @@ func tryFunc(f func()) error { func generateDriverCleanupFunc( f *framework.Framework, driverName, testns, driverns string, - cancelLogging func()) func() { + cancelLogging func()) func(ctx context.Context) { // Cleanup CSI driver and namespaces. This function needs to be idempotent and can be // concurrently called from defer (or AfterEach) and AfterSuite action hooks. - cleanupFunc := func() { + cleanupFunc := func(ctx context.Context) { ginkgo.By(fmt.Sprintf("deleting the test namespace: %s", testns)) // Delete the primary namespace but it's okay to fail here because this namespace will // also be deleted by framework.Aftereach hook - tryFunc(func() { f.DeleteNamespace(testns) }) + _ = tryFunc(func() { f.DeleteNamespace(ctx, testns) }) ginkgo.By(fmt.Sprintf("uninstalling csi %s driver", driverName)) _ = tryFunc(cancelLogging) ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns)) - tryFunc(func() { f.DeleteNamespace(driverns) }) + _ = tryFunc(func() { f.DeleteNamespace(ctx, driverns) }) } return cleanupFunc diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 5d24cfe836b..a44279fcffd 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -150,7 +150,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev }, nil } -func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (n *nfsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := n.externalPluginName parameters := map[string]string{"mountOptions": "vers=4.1"} ns := config.Framework.Namespace.Name @@ -158,25 +158,25 @@ func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (n *nfsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { cs := f.ClientSet ns := f.Namespace n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) // TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner // is not enough. We should create new clusterrole for testing. - err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, + err := e2eauth.BindClusterRole(ctx, cs.RbacV1(), "cluster-admin", ns.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) framework.ExpectNoError(err) ginkgo.DeferCleanup(cs.RbacV1().ClusterRoleBindings().Delete, ns.Name+"--"+"cluster-admin", *metav1.NewDeleteOptions(0)) - err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(ctx, cs.AuthorizationV1(), serviceaccount.MakeUsername(ns.Name, "default"), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization: %v", err) ginkgo.By("creating an external dynamic provisioner pod") - n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) + n.externalProvisionerPod = utils.StartExternalProvisioner(ctx, cs, ns.Name, n.externalPluginName) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, n.externalProvisionerPod) return &storageframework.PerTestConfig{ @@ -186,7 +186,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes } } -func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (n *nfsDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -198,7 +198,7 @@ func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType case storageframework.InlineVolume: fallthrough case storageframework.PreprovisionedPV: - c, serverPod, serverHost := e2evolume.NewNFSServer(cs, ns.Name, []string{}) + c, serverPod, serverHost := e2evolume.NewNFSServer(ctx, cs, ns.Name, []string{}) config.ServerConfig = &c return &nfsVolume{ serverHost: serverHost, @@ -213,8 +213,8 @@ func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType return nil } -func (v *nfsVolume) DeleteVolume() { - cleanUpVolumeServer(v.f, v.serverPod) +func (v *nfsVolume) DeleteVolume(ctx context.Context) { + cleanUpVolumeServer(ctx, v.f, v.serverPod) } // iSCSI @@ -307,7 +307,7 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 return &pvSource, nil } -func (i *iSCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (i *iSCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: i, Prefix: "iscsi", @@ -315,12 +315,12 @@ func (i *iSCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerT } } -func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (i *iSCSIDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace - c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name) + c, serverPod, serverIP, iqn := newISCSIServer(ctx, cs, ns.Name) config.ServerConfig = &c config.ClientNodeSelection = c.ClientNodeSelection return &iSCSIVolume{ @@ -332,7 +332,7 @@ func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volTy } // newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer. -func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) { +func newISCSIServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) { // Generate cluster-wide unique IQN iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace) config = e2evolume.TestConfig{ @@ -351,14 +351,14 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume. ServerReadyMessage: "iscsi target started", ServerHostNetwork: true, } - pod, ip = e2evolume.CreateStorageServer(cs, config) + pod, ip = e2evolume.CreateStorageServer(ctx, cs, config) // Make sure the client runs on the same node as server so we don't need to open any firewalls. config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName} return config, pod, ip, iqn } // newRBDServer is a CephRBD-specific wrapper for CreateStorageServer. -func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { +func newRBDServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { config = e2evolume.TestConfig{ Namespace: namespace, Prefix: "rbd", @@ -369,7 +369,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te }, ServerReadyMessage: "Ceph is ready", } - pod, ip = e2evolume.CreateStorageServer(cs, config) + pod, ip = e2evolume.CreateStorageServer(ctx, cs, config) // create secrets for the server secret = &v1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -386,7 +386,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te Type: "kubernetes.io/rbd", } - secret, err := cs.CoreV1().Secrets(config.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + secret, err := cs.CoreV1().Secrets(config.Namespace).Create(ctx, secret, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create secrets for Ceph RBD: %v", err) } @@ -394,8 +394,8 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te return config, pod, secret, ip } -func (v *iSCSIVolume) DeleteVolume() { - cleanUpVolumeServer(v.f, v.serverPod) +func (v *iSCSIVolume) DeleteVolume(ctx context.Context) { + cleanUpVolumeServer(ctx, v.f, v.serverPod) } // Ceph RBD @@ -501,7 +501,7 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev return &pvSource, nil } -func (r *rbdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (r *rbdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: r, Prefix: "rbd", @@ -509,12 +509,12 @@ func (r *rbdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes } } -func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (r *rbdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace - c, serverPod, secret, serverIP := newRBDServer(cs, ns.Name) + c, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name) config.ServerConfig = &c return &rbdVolume{ serverPod: serverPod, @@ -524,8 +524,8 @@ func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType } } -func (v *rbdVolume) DeleteVolume() { - cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) +func (v *rbdVolume) DeleteVolume(ctx context.Context) { + cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret) } // Ceph @@ -616,7 +616,7 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e }, nil } -func (c *cephFSDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (c *cephFSDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: c, Prefix: "cephfs", @@ -624,12 +624,12 @@ func (c *cephFSDriver) PrepareTest(f *framework.Framework) *storageframework.Per } } -func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (c *cephFSDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace - cfg, serverPod, secret, serverIP := newRBDServer(cs, ns.Name) + cfg, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name) config.ServerConfig = &cfg return &cephVolume{ serverPod: serverPod, @@ -639,8 +639,8 @@ func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volT } } -func (v *cephVolume) DeleteVolume() { - cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) +func (v *cephVolume) DeleteVolume(ctx context.Context) { + cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret) } // Hostpath @@ -693,7 +693,7 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume } } -func (h *hostPathDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (h *hostPathDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpath", @@ -701,12 +701,12 @@ func (h *hostPathDriver) PrepareTest(f *framework.Framework) *storageframework.P } } -func (h *hostPathDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (h *hostPathDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet // pods should be scheduled on the node - node, err := e2enode.GetRandomReadySchedulableNode(cs) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) framework.ExpectNoError(err) config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} return nil @@ -774,7 +774,7 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2 } } -func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (h *hostPathSymlinkDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpathsymlink", @@ -782,7 +782,7 @@ func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) *storagefram } } -func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (h *hostPathSymlinkDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet @@ -791,7 +791,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon volumeName := "test-volume" // pods should be scheduled on the node - node, err := e2enode.GetRandomReadySchedulableNode(cs) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) framework.ExpectNoError(err) config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} @@ -835,13 +835,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon }, } // h.prepPod will be reused in cleanupDriver. - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, prepPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating hostPath init pod") - err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed") - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting hostPath init pod") return &hostPathSymlinkVolume{ sourcePath: sourcePath, @@ -851,19 +851,19 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon } } -func (v *hostPathSymlinkVolume) DeleteVolume() { +func (v *hostPathSymlinkVolume) DeleteVolume(ctx context.Context) { f := v.f cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath) v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, v.prepPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating hostPath teardown pod") - err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed") - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting hostPath teardown pod") } @@ -911,11 +911,11 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume } } -func (e *emptydirDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (e *emptydirDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { return nil } -func (e *emptydirDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (e *emptydirDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: e, Prefix: "emptydir", @@ -968,7 +968,7 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) e2eskipper.SkipUnlessProviderIs("openstack") } -func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (c *cinderDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/cinder" parameters := map[string]string{} if fsType != "" { @@ -979,7 +979,7 @@ func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework. return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (c *cinderDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (c *cinderDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: c, Prefix: "cinder", @@ -1120,7 +1120,7 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 return &pvSource, nil } -func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (g *gcePdDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/gce-pd" parameters := map[string]string{} if fsType != "" { @@ -1132,7 +1132,7 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.P return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (g *gcePdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (g *gcePdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { config := &storageframework.PerTestConfig{ Driver: g, Prefix: "gcepd", @@ -1150,8 +1150,8 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) *storageframework.PerT } -func (g *gcePdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { - zone := getInlineVolumeZone(config.Framework) +func (g *gcePdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { + zone := getInlineVolumeZone(ctx, config.Framework) if volType == storageframework.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. @@ -1162,15 +1162,15 @@ func (g *gcePdDriver) CreateVolume(config *storageframework.PerTestConfig, volTy } } ginkgo.By("creating a test gce pd volume") - vname, err := e2epv.CreatePDWithRetryAndZone(zone) + vname, err := e2epv.CreatePDWithRetryAndZone(ctx, zone) framework.ExpectNoError(err) return &gcePdVolume{ volumeName: vname, } } -func (v *gcePdVolume) DeleteVolume() { - e2epv.DeletePDWithRetry(v.volumeName) +func (v *gcePdVolume) DeleteVolume(ctx context.Context) { + _ = e2epv.DeletePDWithRetry(ctx, v.volumeName) } // vSphere @@ -1269,7 +1269,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, return &pvSource, nil } -func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (v *vSphereDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/vsphere-volume" parameters := map[string]string{} if fsType != "" { @@ -1280,14 +1280,14 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { ginkgo.DeferCleanup(func(ctx context.Context) { // Driver Cleanup function // Logout each vSphere client connection to prevent session leakage - nodes := vspheretest.GetReadySchedulableNodeInfos() + nodes := vspheretest.GetReadySchedulableNodeInfos(ctx) for _, node := range nodes { if node.VSphere.Client != nil { - node.VSphere.Client.Logout(context.TODO()) + _ = node.VSphere.Client.Logout(ctx) } } }) @@ -1298,10 +1298,10 @@ func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.Pe } } -func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (v *vSphereDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework vspheretest.Bootstrap(f) - nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() + nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx) volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) return &vSphereVolume{ @@ -1310,7 +1310,7 @@ func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, vol } } -func (v *vSphereVolume) DeleteVolume() { +func (v *vSphereVolume) DeleteVolume(ctx context.Context) { v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) } @@ -1415,7 +1415,7 @@ func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string return &pvSource, nil } -func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (a *azureDiskDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/azure-disk" parameters := map[string]string{} if fsType != "" { @@ -1427,7 +1427,7 @@ func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframewo return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (a *azureDiskDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (a *azureDiskDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: a, Prefix: "azure", @@ -1435,9 +1435,9 @@ func (a *azureDiskDriver) PrepareTest(f *framework.Framework) *storageframework. } } -func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (a *azureDiskDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { ginkgo.By("creating a test azure disk volume") - zone := getInlineVolumeZone(config.Framework) + zone := getInlineVolumeZone(ctx, config.Framework) if volType == storageframework.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. @@ -1447,15 +1447,15 @@ func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, v }, } } - volumeName, err := e2epv.CreatePDWithRetryAndZone(zone) + volumeName, err := e2epv.CreatePDWithRetryAndZone(ctx, zone) framework.ExpectNoError(err) return &azureDiskVolume{ volumeName: volumeName, } } -func (v *azureDiskVolume) DeleteVolume() { - e2epv.DeletePDWithRetry(v.volumeName) +func (v *azureDiskVolume) DeleteVolume(ctx context.Context) { + _ = e2epv.DeletePDWithRetry(ctx, v.volumeName) } // AWS @@ -1554,7 +1554,7 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev return &pvSource, nil } -func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (a *awsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/aws-ebs" parameters := map[string]string{} if fsType != "" { @@ -1566,7 +1566,7 @@ func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (a *awsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (a *awsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { config := &storageframework.PerTestConfig{ Driver: a, Prefix: "aws", @@ -1583,8 +1583,8 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes return config } -func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { - zone := getInlineVolumeZone(config.Framework) +func (a *awsDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { + zone := getInlineVolumeZone(ctx, config.Framework) if volType == storageframework.InlineVolume || volType == storageframework.PreprovisionedPV { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. @@ -1595,15 +1595,15 @@ func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType } } ginkgo.By("creating a test aws volume") - vname, err := e2epv.CreatePDWithRetryAndZone(zone) + vname, err := e2epv.CreatePDWithRetryAndZone(ctx, zone) framework.ExpectNoError(err) return &awsVolume{ volumeName: vname, } } -func (v *awsVolume) DeleteVolume() { - e2epv.DeletePDWithRetry(v.volumeName) +func (v *awsVolume) DeleteVolume(ctx context.Context) { + _ = e2epv.DeletePDWithRetry(ctx, v.volumeName) } // local @@ -1704,9 +1704,9 @@ func (l *localDriver) GetDriverInfo() *storageframework.DriverInfo { func (l *localDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (l *localDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { var err error - l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + l.node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) l.hostExec = utils.NewHostExec(f) @@ -1717,7 +1717,7 @@ func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerT ssdInterface := "scsi" filesystemType := "fs" ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType) - res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node) + res, err := l.hostExec.IssueCommandWithResult(ctx, ssdCmd, l.node) framework.ExpectNoError(err) num, err := strconv.Atoi(strings.TrimSpace(res)) framework.ExpectNoError(err) @@ -1735,7 +1735,7 @@ func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerT } } -func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (l *localDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { switch volType { case storageframework.PreprovisionedPV: node := l.node @@ -1743,7 +1743,7 @@ func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volTy config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} return &localVolume{ ltrMgr: l.ltrMgr, - ltr: l.ltrMgr.Create(node, l.volumeType, nil), + ltr: l.ltrMgr.Create(ctx, node, l.volumeType, nil), } default: framework.Failf("Unsupported volType: %v is specified", volType) @@ -1751,8 +1751,8 @@ func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volTy return nil } -func (v *localVolume) DeleteVolume() { - v.ltrMgr.Remove(v.ltr) +func (v *localVolume) DeleteVolume(ctx context.Context) { + v.ltrMgr.Remove(ctx, v.ltr) } func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity { @@ -1795,16 +1795,16 @@ func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 } // cleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function. -func cleanUpVolumeServer(f *framework.Framework, serverPod *v1.Pod) { - cleanUpVolumeServerWithSecret(f, serverPod, nil) +func cleanUpVolumeServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) { + cleanUpVolumeServerWithSecret(ctx, f, serverPod, nil) } -func getInlineVolumeZone(f *framework.Framework) string { +func getInlineVolumeZone(ctx context.Context, f *framework.Framework) string { if framework.TestContext.CloudConfig.Zone != "" { return framework.TestContext.CloudConfig.Zone } // if zone is not specified we will randomly pick a zone from schedulable nodes for inline tests - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) zone, ok := node.Labels[v1.LabelFailureDomainBetaZone] if ok { @@ -1814,20 +1814,20 @@ func getInlineVolumeZone(f *framework.Framework) string { } // cleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function. -func cleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) { +func cleanUpVolumeServerWithSecret(ctx context.Context, f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) { cs := f.ClientSet ns := f.Namespace if secret != nil { framework.Logf("Deleting server secret %q...", secret.Name) - err := cs.CoreV1().Secrets(ns.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) + err := cs.CoreV1().Secrets(ns.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}) if err != nil { framework.Logf("Delete secret failed: %v", err) } } framework.Logf("Deleting server pod %q...", serverPod.Name) - err := e2epod.DeletePodWithWait(cs, serverPod) + err := e2epod.DeletePodWithWait(ctx, cs, serverPod) if err != nil { framework.Logf("Server pod delete failed: %v", err) } @@ -1916,7 +1916,7 @@ func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string return &pvSource, nil } -func (a *azureFileDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (a *azureFileDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/azure-file" parameters := map[string]string{} ns := config.Framework.Namespace.Name @@ -1924,7 +1924,7 @@ func (a *azureFileDriver) GetDynamicProvisionStorageClass(config *storageframewo return storageframework.GetStorageClass(provisioner, parameters, &immediateBinding, ns) } -func (a *azureFileDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (a *azureFileDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: a, Prefix: "azure-file", @@ -1932,7 +1932,7 @@ func (a *azureFileDriver) PrepareTest(f *framework.Framework) *storageframework. } } -func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { +func (a *azureFileDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { ginkgo.By("creating a test azure file volume") accountName, accountKey, shareName, err := e2epv.CreateShare() framework.ExpectNoError(err) @@ -1951,7 +1951,7 @@ func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, v Type: "Opaque", } - _, err = config.Framework.ClientSet.CoreV1().Secrets(config.Framework.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = config.Framework.ClientSet.CoreV1().Secrets(config.Framework.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err) return &azureFileVolume{ accountName: accountName, @@ -1961,7 +1961,7 @@ func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, v } } -func (v *azureFileVolume) DeleteVolume() { +func (v *azureFileVolume) DeleteVolume(ctx context.Context) { err := e2epv.DeleteShare(v.accountName, v.shareName) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index cd91ae37f9a..7ff396c6970 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { } var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { }, } - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -146,18 +146,18 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { }, }, } - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Cleaning up the secret") - if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } ginkgo.By("Cleaning up the configmap") - if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) } ginkgo.By("Cleaning up the pod") - if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil { + if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete pod %v: %v", pod.Name, err) } }) @@ -186,11 +186,11 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes. */ framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) { - configMapNames := createConfigmapsForRace(f) + configMapNames := createConfigmapsForRace(ctx, f) ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames) volumes, volumeMounts := makeConfigMapVolumes(configMapNames) for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ { - testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount) + testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount) } }) @@ -199,16 +199,16 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func(ctx context.Context) { - gitURL, gitRepo, cleanup := createGitServer(f) + gitURL, gitRepo, cleanup := createGitServer(ctx, f) defer cleanup() volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo) for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ { - testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount) + testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount) } }) }) -func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) { +func createGitServer(ctx context.Context, f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) { var err error gitServerPodName := "git-server-" + string(uuid.NewUUID()) containerPort := 8000 @@ -217,7 +217,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver") gitServerPod.ObjectMeta.Labels = labels - e2epod.NewPodClient(f).CreateSync(gitServerPod) + e2epod.NewPodClient(f).CreateSync(ctx, gitServerPod) // Portal IP and port httpPort := 2345 @@ -238,17 +238,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle }, } - if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), gitServerSvc, metav1.CreateOptions{}); err != nil { + if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, gitServerSvc, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { ginkgo.By("Cleaning up the git server pod") - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } ginkgo.By("Cleaning up the git server svc") - if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), gitServerSvc.Name, metav1.DeleteOptions{}); err != nil { + if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, gitServerSvc.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } @@ -274,7 +274,7 @@ func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMoun return } -func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { +func createConfigmapsForRace(ctx context.Context, f *framework.Framework) (configMapNames []string) { ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount)) for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ { configMapName := fmt.Sprintf("racey-configmap-%d", i) @@ -288,16 +288,16 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { "data-1": "value-1", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err) } return } -func deleteConfigMaps(f *framework.Framework, configMapNames []string) { +func deleteConfigMaps(ctx context.Context, f *framework.Framework, configMapNames []string) { ginkgo.By("Cleaning up the configMaps") for _, configMapName := range configMapNames { - err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMapName, metav1.DeleteOptions{}) + err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMapName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) } } @@ -329,11 +329,11 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeM return } -func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) { +func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) { const nodeHostnameLabelKey = "kubernetes.io/hostname" rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) - targetNode, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + targetNode, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) ginkgo.By("Creating RC which spawns configmap-volume pods") @@ -383,12 +383,12 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, }, } - _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "error creating replication controller") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName) - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) + pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount) framework.ExpectNoError(err, "error creating pods") ginkgo.By("Ensuring each pod is running") @@ -399,7 +399,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume if pod.DeletionTimestamp != nil { continue } - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name) } } diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index 8cadb232630..72e5bf92e66 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -57,13 +57,13 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { testSource := testSource ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func(ctx context.Context) { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) - pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) // Allow it to sleep for 30 seconds time.Sleep(30 * time.Second) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) }) } }) diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 1ae31c63ce7..e6408a97f0d 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -270,7 +270,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern storageframework.TestPatt } -func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { +func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context, e2econfig *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { var ( sc *storagev1.StorageClass err error @@ -281,7 +281,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storagefra case d.StorageClass.FromName: sc = &storagev1.StorageClass{Provisioner: d.DriverInfo.Name} case d.StorageClass.FromExistingClassName != "": - sc, err = f.ClientSet.StorageV1().StorageClasses().Get(context.TODO(), d.StorageClass.FromExistingClassName, metav1.GetOptions{}) + sc, err = f.ClientSet.StorageV1().StorageClasses().Get(ctx, d.StorageClass.FromExistingClassName, metav1.GetOptions{}) framework.ExpectNoError(err, "getting storage class %s", d.StorageClass.FromExistingClassName) case d.StorageClass.FromFile != "": var ok bool @@ -360,7 +360,7 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) { return snapshotClass, nil } -func (d *driverDefinition) GetSnapshotClass(e2econfig *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { +func (d *driverDefinition) GetSnapshotClass(ctx context.Context, e2econfig *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" { e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name) } @@ -373,7 +373,7 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *storageframework.PerTestC case d.SnapshotClass.FromName: // Do nothing (just use empty parameters) case d.SnapshotClass.FromExistingClassName != "": - snapshotClass, err := f.DynamicClient.Resource(utils.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{}) + snapshotClass, err := f.DynamicClient.Resource(utils.SnapshotClassGVR).Get(ctx, d.SnapshotClass.FromExistingClassName, metav1.GetOptions{}) framework.ExpectNoError(err, "getting snapshot class %s", d.SnapshotClass.FromExistingClassName) if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok { @@ -415,7 +415,7 @@ func (d *driverDefinition) GetCSIDriverName(e2econfig *storageframework.PerTestC return d.DriverInfo.Name } -func (d *driverDefinition) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { +func (d *driverDefinition) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { e2econfig := &storageframework.PerTestConfig{ Driver: d, Prefix: "external", diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index b222f9d877a..0065c406eb3 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -52,7 +52,7 @@ const ( // testFlexVolume tests that a client pod using a given flexvolume driver // successfully mounts it and runs -func testFlexVolume(driver string, config e2evolume.TestConfig, f *framework.Framework) { +func testFlexVolume(ctx context.Context, driver string, config e2evolume.TestConfig, f *framework.Framework) { tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ @@ -65,13 +65,13 @@ func testFlexVolume(driver string, config e2evolume.TestConfig, f *framework.Fra ExpectedContent: "Hello from flexvolume!", }, } - e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests) } // installFlex installs the driver found at filePath on the node, and restarts // kubelet if 'restart' is true. If node is nil, installs on the master, and restarts // controller-manager if 'restart' is true. -func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string) { +func installFlex(ctx context.Context, c clientset.Interface, node *v1.Node, vendor, driver, filePath string) { flexDir := getFlexDir(c, node, vendor, driver) flexFile := path.Join(flexDir, driver) @@ -91,20 +91,20 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath framework.ExpectNoError(err) cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir) - sshAndLog(cmd, host, true /*failOnError*/) + sshAndLog(ctx, cmd, host, true /*failOnError*/) data, err := e2etestfiles.Read(filePath) if err != nil { framework.Fail(err.Error()) } cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data)) - sshAndLog(cmd, host, true /*failOnError*/) + sshAndLog(ctx, cmd, host, true /*failOnError*/) cmd = fmt.Sprintf("sudo chmod +x %s", flexFile) - sshAndLog(cmd, host, true /*failOnError*/) + sshAndLog(ctx, cmd, host, true /*failOnError*/) } -func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) { +func uninstallFlex(ctx context.Context, c clientset.Interface, node *v1.Node, vendor, driver string) { flexDir := getFlexDir(c, node, vendor, driver) host := "" @@ -125,7 +125,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) } cmd := fmt.Sprintf("sudo rm -r %s", flexDir) - sshAndLog(cmd, host, false /*failOnError*/) + sshAndLog(ctx, cmd, host, false /*failOnError*/) } func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string { @@ -137,8 +137,8 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str return flexDir } -func sshAndLog(cmd, host string, failOnError bool) { - result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) +func sshAndLog(ctx context.Context, cmd, host string, failOnError bool) { + result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err) if result.Code != 0 && failOnError { @@ -169,7 +169,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { var config e2evolume.TestConfig var suffix string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "local") e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") @@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { cs = f.ClientSet ns = f.Namespace var err error - node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) config = e2evolume.TestConfig{ Namespace: ns.Name, @@ -193,17 +193,17 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(ctx, cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) - testFlexVolume(driverInstallAs, config, f) + testFlexVolume(ctx, driverInstallAs, config, f) ginkgo.By("waiting for flex client pod to terminate") - if err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) { + if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, node, "k8s", driverInstallAs) + uninstallFlex(ctx, cs, node, "k8s", driverInstallAs) }) ginkgo.It("should be mountable when attachable [Feature:Flexvolumes]", func(ctx context.Context) { @@ -211,14 +211,14 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(ctx, cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) - installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(ctx, cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) - testFlexVolume(driverInstallAs, config, f) + testFlexVolume(ctx, driverInstallAs, config, f) ginkgo.By("waiting for flex client pod to terminate") - if err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) { + if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } @@ -226,8 +226,8 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { time.Sleep(detachTimeout) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, node, "k8s", driverInstallAs) + uninstallFlex(ctx, cs, node, "k8s", driverInstallAs) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) - uninstallFlex(cs, nil, "k8s", driverInstallAs) + uninstallFlex(ctx, cs, nil, "k8s", driverInstallAs) }) }) diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 74df0c56243..8aa87eb6534 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -62,16 +62,16 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] f := framework.NewDefaultFramework("mounted-flexvolume-expand") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws", "gce", "local") e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) - node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name @@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) + resizableSc, err = c.StorageV1().StorageClasses().Create(ctx, newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -102,11 +102,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") - if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, nil, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -115,9 +115,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] ginkgo.It("Should verify mounted flex volumes can be resized", func(ctx context.Context) { driver := "dummy-attachable" ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) - installFlex(c, node, "k8s", driver, path.Join(driverDir, driver)) + installFlex(ctx, c, node, "k8s", driver, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) - installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) + installFlex(ctx, c, nil, "k8s", driver, path.Join(driverDir, driver)) pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ PVSource: v1.PersistentVolumeSource{ @@ -129,25 +129,25 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] VolumeMode: pvc.Spec.VolumeMode, }) - _, err = e2epv.CreatePV(c, f.Timeouts, pv) + _, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv %v", err) ginkgo.By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume - pvs, err = e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectEqual(len(pvs), 1) ginkgo.By("Creating a deployment with the provisioned volume") - deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") + deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{}) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, c) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -158,25 +158,25 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] } ginkgo.By("Waiting for cloudprovider resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Getting a pod from deployment") - podList, err := e2edeployment.GetPodsForDeployment(c, deployment) + podList, err := e2edeployment.GetPodsForDeployment(ctx, c, deployment) framework.ExpectNoError(err, "While getting pods from deployment") gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] ginkgo.By("Deleting the pod from deployment") - err = e2epod.DeletePodWithWait(c, &pod) + err = e2epod.DeletePodWithWait(ctx, c, &pod) framework.ExpectNoError(err, "while deleting pod for resizing") ginkgo.By("Waiting for deployment to create new pod") - pod, err = waitForDeploymentToRecreatePod(c, deployment) + pod, err = waitForDeploymentToRecreatePod(ctx, c, deployment) framework.ExpectNoError(err, "While waiting for pod to be recreated") ginkgo.By("Waiting for file system resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, c) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 7cae7942373..18d86eb7426 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -56,17 +56,17 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan f := framework.NewDefaultFramework("mounted-flexvolume-expand") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws", "gce", "local") e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) var err error - node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name @@ -84,7 +84,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) + resizableSc, err = c.StorageV1().StorageClasses().Create(ctx, newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -97,11 +97,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc: %v", err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") - if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, nil, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -113,9 +113,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan driver := "dummy-attachable" ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) - installFlex(c, node, "k8s", driver, path.Join(driverDir, driver)) + installFlex(ctx, c, node, "k8s", driver, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) - installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) + installFlex(ctx, c, nil, "k8s", driver, path.Join(driverDir, driver)) pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ PVSource: v1.PersistentVolumeSource{ @@ -127,30 +127,30 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan VolumeMode: pvc.Spec.VolumeMode, }) - _, err = e2epv.CreatePV(c, f.Timeouts, pv) + _, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv %v", err) ginkgo.By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume - pvs, err = e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectEqual(len(pvs), 1) var pod *v1.Pod ginkgo.By("Creating pod") - pod, err = createNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) + pod, err = createNginxPod(ctx, c, ns, nodeKeyValueLabel, pvcClaims) framework.ExpectNoError(err, "Failed to create pod %v", err) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod) ginkgo.By("Waiting for pod to go to 'running' state") - err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name) framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, c) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -161,11 +161,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan } ginkgo.By("Waiting for cloudprovider resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Waiting for file system resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, c) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions @@ -174,19 +174,19 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan }) // createNginxPod creates an nginx pod. -func createNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { +func createNginxPod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { pod := makeNginxPod(namespace, nodeSelector, pvclaims) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } diff --git a/test/e2e/storage/framework/driver_operations.go b/test/e2e/storage/framework/driver_operations.go index b3326a7fd44..7e63a9e4269 100644 --- a/test/e2e/storage/framework/driver_operations.go +++ b/test/e2e/storage/framework/driver_operations.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "fmt" storagev1 "k8s.io/api/storage/v1" @@ -36,11 +37,11 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string { } // CreateVolume creates volume for test unless dynamicPV or CSI ephemeral inline volume test -func CreateVolume(driver TestDriver, config *PerTestConfig, volType TestVolType) TestVolume { +func CreateVolume(ctx context.Context, driver TestDriver, config *PerTestConfig, volType TestVolType) TestVolume { switch volType { case InlineVolume, PreprovisionedPV: if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { - return pDriver.CreateVolume(config, volType) + return pDriver.CreateVolume(ctx, config, volType) } case CSIInlineVolume, GenericEphemeralVolume, DynamicPV: // No need to create volume diff --git a/test/e2e/storage/framework/snapshot_resource.go b/test/e2e/storage/framework/snapshot_resource.go index 7d80bd2ac4c..8e07edf1880 100644 --- a/test/e2e/storage/framework/snapshot_resource.go +++ b/test/e2e/storage/framework/snapshot_resource.go @@ -45,7 +45,7 @@ type SnapshotResource struct { // CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot // from the VolumeSnapshotClass using a dynamic client. // Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects. -func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured) { +func CreateSnapshot(ctx context.Context, sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured) { defer ginkgo.GinkgoRecover() var err error if pattern.SnapshotType != DynamicCreatedSnapshot && pattern.SnapshotType != PreprovisionedCreatedSnapshot { @@ -55,23 +55,23 @@ func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, patt dc := config.Framework.DynamicClient ginkgo.By("creating a SnapshotClass") - sclass := sDriver.GetSnapshotClass(config, parameters) + sclass := sDriver.GetSnapshotClass(ctx, config, parameters) if sclass == nil { framework.Failf("Failed to get snapshot class based on test config") } sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String() - sclass, err = dc.Resource(utils.SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{}) + sclass, err = dc.Resource(utils.SnapshotClassGVR).Create(ctx, sclass, metav1.CreateOptions{}) framework.ExpectNoError(err) - sclass, err = dc.Resource(utils.SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{}) + sclass, err = dc.Resource(utils.SnapshotClassGVR).Get(ctx, sclass.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("creating a dynamic VolumeSnapshot") // prepare a dynamically provisioned volume snapshot with certain data snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName()) - snapshot, err = dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{}) + snapshot, err = dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(ctx, snapshot, metav1.CreateOptions{}) framework.ExpectNoError(err) return sclass, snapshot @@ -79,17 +79,17 @@ func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, patt // CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with // different test pattern snapshot provisioning and deletion policy -func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) *SnapshotResource { +func CreateSnapshotResource(ctx context.Context, sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) *SnapshotResource { var err error r := SnapshotResource{ Config: config, Pattern: pattern, } - r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters) + r.Vsclass, r.Vs = CreateSnapshot(ctx, sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters) dc := r.Config.Framework.DynamicClient - r.Vscontent = utils.GetSnapshotContentFromSnapshot(dc, r.Vs, timeouts.SnapshotCreate) + r.Vscontent = utils.GetSnapshotContentFromSnapshot(ctx, dc, r.Vs, timeouts.SnapshotCreate) if pattern.SnapshotType == PreprovisionedCreatedSnapshot { // prepare a pre-provisioned VolumeSnapshotContent with certain data @@ -101,7 +101,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf ginkgo.By("updating the snapshot content deletion policy to retain") r.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Retain" - r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{}) + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(ctx, r.Vscontent, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("recording properties of the preprovisioned snapshot") @@ -117,24 +117,24 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf // when the vscontent is manually deleted then the underlying snapshot resource will not be deleted. // We exploit this to create a snapshot resource from which we can create a preprovisioned snapshot ginkgo.By("deleting the snapshot and snapshot content") - err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(context.TODO(), r.Vs.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(ctx, r.Vs.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { err = nil } framework.ExpectNoError(err) ginkgo.By("checking the Snapshot has been deleted") - err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForNamespacedGVRDeletion(ctx, dc, utils.SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) - err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotContentGVR).Delete(ctx, r.Vscontent.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { err = nil } framework.ExpectNoError(err) ginkgo.By("checking the Snapshot content has been deleted") - err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForGVRDeletion(ctx, dc, utils.SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) ginkgo.By("creating a snapshot content with the snapshot handle") @@ -144,29 +144,29 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf snapcontentName := getPreProvisionedSnapshotContentName(uuid) r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapshotContentAnnotations, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName) - r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{}) + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Create(ctx, r.Vscontent, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("creating a snapshot with that snapshot content") r.Vs = getPreProvisionedSnapshot(snapName, pvcNamespace, snapcontentName) - r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{}) + r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(ctx, r.Vs, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = utils.WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate) + err = utils.WaitForSnapshotReady(ctx, dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate) framework.ExpectNoError(err) ginkgo.By("getting the snapshot and snapshot content") - r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{}) + r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(ctx, r.Vs.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) - r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), r.Vscontent.GetName(), metav1.GetOptions{}) + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(ctx, r.Vscontent.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) } return &r } // CleanupResource cleans up the snapshot resource and ignores not found errors -func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error { +func (sr *SnapshotResource) CleanupResource(ctx context.Context, timeouts *framework.TimeoutContext) error { var err error var cleanupErrs []error @@ -175,7 +175,7 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) if sr.Vs != nil { framework.Logf("deleting snapshot %q/%q", sr.Vs.GetNamespace(), sr.Vs.GetName()) - sr.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(context.TODO(), sr.Vs.GetName(), metav1.GetOptions{}) + sr.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(ctx, sr.Vs.GetName(), metav1.GetOptions{}) switch { case err == nil: snapshotStatus := sr.Vs.Object["status"].(map[string]interface{}) @@ -183,7 +183,7 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) framework.Logf("received snapshotStatus %v", snapshotStatus) framework.Logf("snapshotContentName %s", snapshotContentName) - boundVsContent, err := dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + boundVsContent, err := dc.Resource(utils.SnapshotContentGVR).Get(ctx, snapshotContentName, metav1.GetOptions{}) switch { case err == nil: if boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { @@ -191,27 +191,27 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) // We must update the SnapshotContent to have Delete Deletion policy, // or else the physical snapshot content will be leaked. boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" - boundVsContent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), boundVsContent, metav1.UpdateOptions{}) + boundVsContent, err = dc.Resource(utils.SnapshotContentGVR).Update(ctx, boundVsContent, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(ctx, sr.Vs.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { err = nil } framework.ExpectNoError(err) - err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForGVRDeletion(ctx, dc, utils.SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) case apierrors.IsNotFound(err): // the volume snapshot is not bound to snapshot content yet - err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(ctx, sr.Vs.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { err = nil } framework.ExpectNoError(err) - err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForNamespacedGVRDeletion(ctx, dc, utils.SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) default: cleanupErrs = append(cleanupErrs, err) @@ -225,7 +225,7 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) if sr.Vscontent != nil { framework.Logf("deleting snapshot content %q", sr.Vscontent.GetName()) - sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), sr.Vscontent.GetName(), metav1.GetOptions{}) + sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(ctx, sr.Vscontent.GetName(), metav1.GetOptions{}) switch { case err == nil: if sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { @@ -233,16 +233,16 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) // We must update the SnapshotContent to have Delete Deletion policy, // or else the physical snapshot content will be leaked. sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" - sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), sr.Vscontent, metav1.UpdateOptions{}) + sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(ctx, sr.Vscontent, metav1.UpdateOptions{}) framework.ExpectNoError(err) } - err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), sr.Vscontent.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotContentGVR).Delete(ctx, sr.Vscontent.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { err = nil } framework.ExpectNoError(err) - err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForGVRDeletion(ctx, dc, utils.SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) case apierrors.IsNotFound(err): // Hope the underlying physical snapshot resource has been deleted already @@ -253,11 +253,11 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) if sr.Vsclass != nil { framework.Logf("deleting snapshot class %q", sr.Vsclass.GetName()) // typically this snapshot class has already been deleted - err = dc.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), sr.Vsclass.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(utils.SnapshotClassGVR).Delete(ctx, sr.Vsclass.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err) } - err = utils.WaitForGVRDeletion(dc, utils.SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete) + err = utils.WaitForGVRDeletion(ctx, dc, utils.SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete) framework.ExpectNoError(err) } return utilerrors.NewAggregate(cleanupErrs) diff --git a/test/e2e/storage/framework/testdriver.go b/test/e2e/storage/framework/testdriver.go index 5040071fd0c..ea6b80847a3 100644 --- a/test/e2e/storage/framework/testdriver.go +++ b/test/e2e/storage/framework/testdriver.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -49,21 +50,21 @@ type TestDriver interface { // PrepareTest is called at test execution time each time a new test case is about to start. // It sets up all necessary resources and returns the per-test configuration. // Cleanup is handled via ginkgo.DeferCleanup inside PrepareTest. - PrepareTest(f *framework.Framework) *PerTestConfig + PrepareTest(ctx context.Context, f *framework.Framework) *PerTestConfig } // TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume. // The only common functionality is to delete it. Individual driver interfaces // have additional methods that work with volumes created by them. type TestVolume interface { - DeleteVolume() + DeleteVolume(ctx context.Context) } // PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume type PreprovisionedVolumeTestDriver interface { TestDriver // CreateVolume creates a pre-provisioned volume of the desired volume type. - CreateVolume(config *PerTestConfig, volumeType TestVolType) TestVolume + CreateVolume(ctx context.Context, config *PerTestConfig, volumeType TestVolType) TestVolume } // InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume @@ -89,12 +90,12 @@ type PreprovisionedPVTestDriver interface { type DynamicPVTestDriver interface { TestDriver // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. - // The StorageClass must be created in the current test's namespace and have - // a unique name inside that namespace because GetDynamicProvisionStorageClass might + // The StorageClass must have + // a unique name because GetDynamicProvisionStorageClass might // be called more than once per test. // It will set fsType to the StorageClass, if TestDriver supports it. // It will return nil, if the TestDriver doesn't support it. - GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass + GetDynamicProvisionStorageClass(ctx context.Context, config *PerTestConfig, fsType string) *storagev1.StorageClass } // EphemeralTestDriver represents an interface for a TestDriver that supports ephemeral inline volumes. @@ -126,7 +127,7 @@ type SnapshottableTestDriver interface { TestDriver // GetSnapshotClass returns a SnapshotClass to create snapshot. // It will return nil, if the TestDriver doesn't support it. - GetSnapshotClass(config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured + GetSnapshotClass(ctx context.Context, config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured } // CustomTimeoutsTestDriver represents an interface fo a TestDriver that supports custom timeouts. diff --git a/test/e2e/storage/framework/volume_resource.go b/test/e2e/storage/framework/volume_resource.go index dbce2505cbe..12ce1872e7c 100644 --- a/test/e2e/storage/framework/volume_resource.go +++ b/test/e2e/storage/framework/volume_resource.go @@ -52,12 +52,12 @@ type VolumeResource struct { // CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with // different test pattern volume types. -func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource { - return CreateVolumeResourceWithAccessModes(driver, config, pattern, testVolumeSizeRange, driver.GetDriverInfo().RequiredAccessModes) +func CreateVolumeResource(ctx context.Context, driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource { + return CreateVolumeResourceWithAccessModes(ctx, driver, config, pattern, testVolumeSizeRange, driver.GetDriverInfo().RequiredAccessModes) } // CreateVolumeResourceWithAccessModes constructs a VolumeResource for the current test with the provided access modes. -func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange, accessModes []v1.PersistentVolumeAccessMode) *VolumeResource { +func CreateVolumeResourceWithAccessModes(ctx context.Context, driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange, accessModes []v1.PersistentVolumeAccessMode) *VolumeResource { r := VolumeResource{ Config: config, Pattern: pattern, @@ -67,7 +67,7 @@ func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfi cs := f.ClientSet // Create volume for pre-provisioned volume tests - r.Volume = CreateVolume(driver, config, pattern.VolType) + r.Volume = CreateVolume(ctx, driver, config, pattern.VolType) switch pattern.VolType { case InlineVolume: @@ -80,7 +80,7 @@ func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfi if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume) if pvSource != nil { - r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, accessModes) + r.Pv, r.Pvc = createPVCPV(ctx, f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, accessModes) r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */) } } @@ -92,7 +92,7 @@ func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfi claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange) - r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType) + r.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, r.Config, pattern.FsType) if pattern.BindingMode != "" { r.Sc.VolumeBindingMode = &pattern.BindingMode @@ -101,13 +101,13 @@ func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfi ginkgo.By("creating a StorageClass " + r.Sc.Name) - r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{}) + r.Sc, err = cs.StorageV1().StorageClasses().Create(ctx, r.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err) switch pattern.VolType { case DynamicPV: r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC( - f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, accessModes) + ctx, f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, accessModes) r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */) case GenericEphemeralVolume: driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange @@ -167,14 +167,14 @@ func createEphemeralVolumeSource(scName string, volMode v1.PersistentVolumeMode, } // CleanupResource cleans up VolumeResource -func (r *VolumeResource) CleanupResource() error { +func (r *VolumeResource) CleanupResource(ctx context.Context) error { f := r.Config.Framework var cleanUpErrs []error if r.Pvc != nil || r.Pv != nil { switch r.Pattern.VolType { case PreprovisionedPV: ginkgo.By("Deleting pv and pvc") - if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 { + if errs := e2epv.PVPVCCleanup(ctx, f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 { framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) } case DynamicPV: @@ -189,11 +189,11 @@ func (r *VolumeResource) CleanupResource() error { pv := r.Pv if pv == nil && r.Pvc.Name != "" { // This happens for late binding. Check whether we have a volume now that we need to wait for. - pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{}) + pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(ctx, r.Pvc.Name, metav1.GetOptions{}) switch { case err == nil: if pvc.Spec.VolumeName != "" { - pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, err = cs.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { cleanUpErrs = append(cleanUpErrs, fmt.Errorf("failed to find PV %v: %w", pvc.Spec.VolumeName, err)) } @@ -206,13 +206,13 @@ func (r *VolumeResource) CleanupResource() error { } } - err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name) + err := e2epv.DeletePersistentVolumeClaim(ctx, f.ClientSet, r.Pvc.Name, f.Namespace.Name) if err != nil { cleanUpErrs = append(cleanUpErrs, fmt.Errorf("failed to delete PVC %v: %w", r.Pvc.Name, err)) } if pv != nil { - err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, f.Timeouts.PVDelete) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, pv.Name, 5*time.Second, f.Timeouts.PVDelete) if err != nil { cleanUpErrs = append(cleanUpErrs, fmt.Errorf( "persistent Volume %v not deleted by dynamic provisioner: %w", pv.Name, err)) @@ -226,14 +226,16 @@ func (r *VolumeResource) CleanupResource() error { if r.Sc != nil { ginkgo.By("Deleting sc") - if err := storageutils.DeleteStorageClass(f.ClientSet, r.Sc.Name); err != nil { + if err := storageutils.DeleteStorageClass(ctx, f.ClientSet, r.Sc.Name); err != nil { cleanUpErrs = append(cleanUpErrs, fmt.Errorf("failed to delete StorageClass %v: %w", r.Sc.Name, err)) } } // Cleanup volume for pre-provisioned volume tests if r.Volume != nil { - if err := storageutils.TryFunc(r.Volume.DeleteVolume); err != nil { + if err := storageutils.TryFunc(func() { + r.Volume.DeleteVolume(ctx) + }); err != nil { cleanUpErrs = append(cleanUpErrs, fmt.Errorf("failed to delete Volume: %w", err)) } } @@ -241,6 +243,7 @@ func (r *VolumeResource) CleanupResource() error { } func createPVCPV( + ctx context.Context, f *framework.Framework, name string, pvSource *v1.PersistentVolumeSource, @@ -267,16 +270,17 @@ func createPVCPV( } framework.Logf("Creating PVC and PV") - pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) + pv, pvc, err := e2epv.CreatePVCPV(ctx, f.ClientSet, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) framework.ExpectNoError(err, "PVC, PV creation failed") - err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc) + err = e2epv.WaitOnPVandPVC(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc) framework.ExpectNoError(err, "PVC, PV failed to bind") return pv, pvc } func createPVCPVFromDynamicProvisionSC( + ctx context.Context, f *framework.Framework, name string, claimSize string, @@ -299,20 +303,20 @@ func createPVCPVFromDynamicProvisionSC( pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns) var err error - pvc, err = e2epv.CreatePVC(cs, ns, pvc) + pvc, err = e2epv.CreatePVC(ctx, cs, ns, pvc) framework.ExpectNoError(err) if !isDelayedBinding(sc) { - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) } - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) var pv *v1.PersistentVolume if !isDelayedBinding(sc) { - pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, err = cs.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 79ca3109825..b7c82a110b7 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -72,29 +72,29 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { pvc *v1.PersistentVolumeClaim pv *v1.PersistentVolume ) - ginkgo.BeforeEach(func() { - e2epv.SkipIfNoDefaultStorageClass(c) + ginkgo.BeforeEach(func(ctx context.Context) { + e2epv.SkipIfNoDefaultStorageClass(ctx, c) framework.Logf("Initializing pod and pvcs for test") - clientPod, pvc, pv = createPodPVCFromSC(f, c, ns) + clientPod, pvc, pv = createPodPVCFromSC(ctx, f, c, ns) }) for _, test := range disruptiveTestTable { func(t disruptiveTest) { ginkgo.It(t.testItStmt, func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() ginkgo.By("Executing Spec") - t.runTest(c, f, clientPod, e2epod.VolumeMountPath1) + t.runTest(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) }(test) } - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("Tearing down test spec") - tearDownTestCase(c, f, ns, clientPod, pvc, pv, false) + tearDownTestCase(ctx, c, f, ns, clientPod, pvc, pv, false) pvc, clientPod = nil, nil }) }) }) -func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume) { +func createPodPVCFromSC(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume) { var err error test := testsuites.StorageClassTest{ Name: "default", @@ -105,10 +105,10 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") pvcClaims := []*v1.PersistentVolumeClaim{pvc} - pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectEqual(len(pvs), 1) @@ -118,7 +118,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string PVCs: pvcClaims, SeLinuxLabel: e2epv.SELinuxLabel, } - pod, err := e2epod.CreateSecPod(c, &podConfig, f.Timeouts.PodStart) + pod, err := e2epod.CreateSecPod(ctx, c, &podConfig, f.Timeouts.PodStart) framework.ExpectNoError(err, "While creating pods for kubelet restart test") return pod, pvc, pvs[0] } diff --git a/test/e2e/storage/gke_local_ssd.go b/test/e2e/storage/gke_local_ssd.go index 36987ba33b0..263f3c573cc 100644 --- a/test/e2e/storage/gke_local_ssd.go +++ b/test/e2e/storage/gke_local_ssd.go @@ -45,7 +45,7 @@ var _ = utils.SIGDescribe("GKE local SSD [Feature:GKELocalSSD]", func() { ginkgo.It("should write and read from node local SSD [Feature:GKELocalSSD]", func(ctx context.Context) { framework.Logf("Start local SSD test") createNodePoolWithLocalSsds("np-ssd") - doTestWriteAndReadToLocalSsd(f) + doTestWriteAndReadToLocalSsd(ctx, f) }) }) @@ -62,12 +62,12 @@ func createNodePoolWithLocalSsds(nodePoolName string) { framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out)) } -func doTestWriteAndReadToLocalSsd(f *framework.Framework) { +func doTestWriteAndReadToLocalSsd(ctx context.Context, f *framework.Framework) { var pod = testPodWithSsd("echo 'hello world' > /mnt/disks/ssd0/data && sleep 1 && cat /mnt/disks/ssd0/data") var msg string var out = []string{"hello world"} - e2eoutput.TestContainerOutput(f, msg, pod, 0, out) + e2eoutput.TestContainerOutput(ctx, f, msg, pod, 0, out) } func testPodWithSsd(command string) *v1.Pod { diff --git a/test/e2e/storage/host_path_type.go b/test/e2e/storage/host_path_type.go index 9369af59497..c2e18bff22e 100644 --- a/test/e2e/storage/host_path_type.go +++ b/test/e2e/storage/host_path_type.go @@ -56,50 +56,50 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { hostPathBlockDev = v1.HostPathBlockDev ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(ctx, newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetDir = path.Join(hostBaseDir, "adir") ginkgo.By("Should automatically create a new directory 'adir' when HostPathType is HostPathDirectoryOrCreate") - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectoryOrCreate) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectoryOrCreate) }) ginkgo.It("Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory", func(ctx context.Context) { dirPath := path.Join(hostBaseDir, "does-not-exist-dir") - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, dirPath, fmt.Sprintf("%s is not a directory", dirPath), &hostPathDirectory) }) ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectory) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectory) }) ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathUnset) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathUnset) }) ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathFile", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a file", targetDir), &hostPathFile) }) ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathSocket", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a socket", targetDir), &hostPathSocket) }) ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a character device", targetDir), &hostPathCharDev) }) ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a block device", targetDir), &hostPathBlockDev) + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a block device", targetDir), &hostPathBlockDev) }) }) @@ -124,50 +124,50 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { hostPathBlockDev = v1.HostPathBlockDev ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(ctx, newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetFile = path.Join(hostBaseDir, "afile") ginkgo.By("Should automatically create a new file 'afile' when HostPathType is HostPathFileOrCreate") - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFileOrCreate) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFileOrCreate) }) ginkgo.It("Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile", func(ctx context.Context) { filePath := path.Join(hostBaseDir, "does-not-exist-file") - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, filePath, fmt.Sprintf("%s is not a file", filePath), &hostPathFile) }) ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathFile", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFile) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFile) }) ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathUnset) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathUnset) }) ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathDirectory", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a directory", targetFile), &hostPathDirectory) }) ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathSocket", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a socket", targetFile), &hostPathSocket) }) ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathCharDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a character device", targetFile), &hostPathCharDev) }) ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a block device", targetFile), &hostPathBlockDev) }) }) @@ -192,48 +192,48 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { hostPathBlockDev = v1.HostPathBlockDev ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket")))) + basePod = e2epod.NewPodClient(f).CreateSync(ctx, newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket")))) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetSocket = path.Join(hostBaseDir, "asocket") }) ginkgo.It("Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket", func(ctx context.Context) { socketPath := path.Join(hostBaseDir, "does-not-exist-socket") - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, socketPath, fmt.Sprintf("%s is not a socket", socketPath), &hostPathSocket) }) ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathSocket) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathSocket) }) ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathUnset) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathUnset) }) ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a directory", targetSocket), &hostPathDirectory) }) ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathFile", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a file", targetSocket), &hostPathFile) }) ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a character device", targetSocket), &hostPathCharDev) }) ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a block device", targetSocket), &hostPathBlockDev) }) }) @@ -258,13 +258,13 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { hostPathBlockDev = v1.HostPathBlockDev ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(ctx, newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetCharDev = path.Join(hostBaseDir, "achardev") ginkgo.By("Create a character device for further testing") @@ -275,35 +275,35 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { ginkgo.It("Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev", func(ctx context.Context) { charDevPath := path.Join(hostBaseDir, "does-not-exist-char-dev") - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, charDevPath, fmt.Sprintf("%s is not a character device", charDevPath), &hostPathCharDev) }) ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathCharDev) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathCharDev) }) ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathUnset) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathUnset) }) ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a directory", targetCharDev), &hostPathDirectory) }) ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathFile", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a file", targetCharDev), &hostPathFile) }) ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a socket", targetCharDev), &hostPathSocket) }) ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a block device", targetCharDev), &hostPathBlockDev) }) }) @@ -328,13 +328,13 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { hostPathBlockDev = v1.HostPathBlockDev ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(ctx, newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetBlockDev = path.Join(hostBaseDir, "ablkdev") ginkgo.By("Create a block device for further testing") @@ -345,35 +345,35 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { ginkgo.It("Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev", func(ctx context.Context) { blkDevPath := path.Join(hostBaseDir, "does-not-exist-blk-dev") - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, blkDevPath, fmt.Sprintf("%s is not a block device", blkDevPath), &hostPathBlockDev) }) ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathBlockDev) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathBlockDev) }) ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { - verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathUnset) + verifyPodHostPathType(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathUnset) }) ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a directory", targetBlockDev), &hostPathDirectory) }) ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a file", targetBlockDev), &hostPathFile) }) ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a socket", targetBlockDev), &hostPathSocket) }) ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev", func(ctx context.Context) { - verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, + verifyPodHostPathTypeFailure(ctx, f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a character device", targetBlockDev), &hostPathCharDev) }) }) @@ -454,10 +454,10 @@ func newHostPathTypeTestPodWithCommand(nodeSelector map[string]string, hostDir, return pod } -func verifyPodHostPathTypeFailure(f *framework.Framework, nodeSelector map[string]string, hostDir, pattern string, hostPathType *v1.HostPathType) { +func verifyPodHostPathTypeFailure(ctx context.Context, f *framework.Framework, nodeSelector map[string]string, hostDir, pattern string, hostPathType *v1.HostPathType) { pod := newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Checking for HostPathType error event") @@ -469,25 +469,25 @@ func verifyPodHostPathTypeFailure(f *framework.Framework, nodeSelector map[strin }.AsSelector().String() msg := "hostPath type check failed" - err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, f.Timeouts.PodStart) + err = e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, msg, f.Timeouts.PodStart) // Events are unreliable, don't depend on the event. It's used only to speed up the test. if err != nil { framework.Logf("Warning: did not get event about FailedMountVolume") } // Check the pod is still not running - p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") - f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) } -func verifyPodHostPathType(f *framework.Framework, nodeSelector map[string]string, hostDir string, hostPathType *v1.HostPathType) { - newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), +func verifyPodHostPathType(ctx context.Context, f *framework.Framework, nodeSelector map[string]string, hostDir string, hostPathType *v1.HostPathType) { + newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType), metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, f.Timeouts.PodStart)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, newPod.Name, newPod.Namespace, f.Timeouts.PodStart)) - f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), newPod.Name, *metav1.NewDeleteOptions(0)) + f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, newPod.Name, *metav1.NewDeleteOptions(0)) } diff --git a/test/e2e/storage/local_volume_resize.go b/test/e2e/storage/local_volume_resize.go index 8bb428eaafd..bfd7ec0ca69 100644 --- a/test/e2e/storage/local_volume_resize.go +++ b/test/e2e/storage/local_volume_resize.go @@ -55,8 +55,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { testVolType := BlockFsWithFormatLocalVolumeType var testVol *localTestVolume testMode := immediateMode - ginkgo.BeforeEach(func() { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) + ginkgo.BeforeEach(func(ctx context.Context) { + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes) framework.ExpectNoError(err) scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name) @@ -77,13 +77,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { ltrMgr: ltrMgr, } - setupExpandableLocalStorageClass(config, &testMode) - testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, testMode) + setupExpandableLocalStorageClass(ctx, config, &testMode) + testVols := setupLocalVolumesPVCsPVs(ctx, config, testVolType, config.randomNode, 1, testMode) testVol = testVols[0] }) - ginkgo.AfterEach(func() { - cleanupLocalVolumes(config, []*localTestVolume{testVol}) - cleanupStorageClass(config) + ginkgo.AfterEach(func(ctx context.Context) { + cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) + cleanupStorageClass(ctx, config) }) ginkgo.It("should support online expansion on node", func(ctx context.Context) { @@ -92,9 +92,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { pod1Err error ) ginkgo.By("Creating pod1") - pod1, pod1Err = createLocalPod(config, testVol, nil) + pod1, pod1Err = createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod1Err) - verifyLocalPod(config, testVol, pod1, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod1, config.randomNode.Name) // We expand the PVC while l.pod is using it for online expansion. ginkgo.By("Expanding current pvc") @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("10Mi")) framework.Logf("currentPvcSize %s, newSize %s", currentPvcSize.String(), newSize.String()) - newPVC, err := testsuites.ExpandPVCSize(testVol.pvc, newSize, f.ClientSet) + newPVC, err := testsuites.ExpandPVCSize(ctx, testVol.pvc, newSize, f.ClientSet) framework.ExpectNoError(err, "While updating pvc for more size") testVol.pvc = newPVC gomega.Expect(testVol.pvc).NotTo(gomega.BeNil()) @@ -113,17 +113,17 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { } // Now update the underlying volume manually - err = config.ltrMgr.ExpandBlockDevice(testVol.ltr, 10 /*number of 1M blocks to add*/) + err = config.ltrMgr.ExpandBlockDevice(ctx, testVol.ltr, 10 /*number of 1M blocks to add*/) framework.ExpectNoError(err, "while expanding loopback device") // now update PV to matching size - pv, err := UpdatePVSize(testVol.pv, newSize, f.ClientSet) + pv, err := UpdatePVSize(ctx, testVol.pv, newSize, f.ClientSet) framework.ExpectNoError(err, "while updating pv to more size") gomega.Expect(pv).NotTo(gomega.BeNil()) testVol.pv = pv ginkgo.By("Waiting for file system resize to finish") - testVol.pvc, err = testsuites.WaitForFSResize(testVol.pvc, f.ClientSet) + testVol.pvc, err = testsuites.WaitForFSResize(ctx, testVol.pvc, f.ClientSet) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := testVol.pvc.Status.Conditions @@ -134,19 +134,19 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { }) -func UpdatePVSize(pv *v1.PersistentVolume, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolume, error) { +func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolume, error) { pvName := pv.Name pvToUpdate := pv.DeepCopy() var lastError error - waitErr := wait.PollImmediate(5*time.Second, csiResizeWaitPeriod, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, csiResizeWaitPeriod, func(ctx context.Context) (bool, error) { var err error - pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pv %s: %v", pvName, err) } pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size - pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pvToUpdate, metav1.UpdateOptions{}) + pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{}) if err != nil { framework.Logf("error updating PV %s: %v", pvName, err) lastError = err @@ -163,7 +163,7 @@ func UpdatePVSize(pv *v1.PersistentVolume, size resource.Quantity, c clientset.I return pvToUpdate, nil } -func setupExpandableLocalStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMode) { +func setupExpandableLocalStorageClass(ctx context.Context, config *localTestConfig, mode *storagev1.VolumeBindingMode) { enableExpansion := true sc := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -174,6 +174,6 @@ func setupExpandableLocalStorageClass(config *localTestConfig, mode *storagev1.V AllowVolumeExpansion: &enableExpansion, } - _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) + _, err := config.client.StorageV1().StorageClasses().Create(ctx, sc, metav1.CreateOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 5a7f288ea8d..dcce82a77b8 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -62,9 +62,9 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name @@ -93,12 +93,12 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun StorageClassName: &(sc.Name), VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pvc") ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") - if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, nil, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -111,19 +111,19 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun // Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted. // We should consider adding a unit test that exercises this better. ginkgo.By("Creating a deployment with selected PVC") - deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") + deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{}) // PVC should be bound at this point ginkgo.By("Checking for bound PVC") - pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectEqual(len(pvs), 1) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") - newPVC, err := testsuites.ExpandPVCSize(pvc, newSize, c) + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, c) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -134,25 +134,25 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun } ginkgo.By("Waiting for cloudprovider resize to finish") - err = testsuites.WaitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, c, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Getting a pod from deployment") - podList, err := e2edeployment.GetPodsForDeployment(c, deployment) + podList, err := e2edeployment.GetPodsForDeployment(ctx, c, deployment) framework.ExpectNoError(err, "While getting pods from deployment") gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] ginkgo.By("Deleting the pod from deployment") - err = e2epod.DeletePodWithWait(c, &pod) + err = e2epod.DeletePodWithWait(ctx, c, &pod) framework.ExpectNoError(err, "while deleting pod for resizing") ginkgo.By("Waiting for deployment to create new pod") - pod, err = waitForDeploymentToRecreatePod(c, deployment) + pod, err = waitForDeploymentToRecreatePod(ctx, c, deployment) framework.ExpectNoError(err, "While waiting for pod to be recreated") ginkgo.By("Waiting for file system resize to finish") - pvc, err = testsuites.WaitForFSResize(pvc, c) + pvc, err = testsuites.WaitForFSResize(ctx, pvc, c) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions @@ -160,10 +160,10 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun }) }) -func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) { +func waitForDeploymentToRecreatePod(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) { var runningPod v1.Pod waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { - podList, err := e2edeployment.GetPodsForDeployment(client, deployment) + podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) if err != nil { return false, fmt.Errorf("failed to get pods for deployment: %v", err) } diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index feba8c7bf90..ea3cc83c425 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -42,18 +42,18 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) +type testBody func(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) type disruptiveTest struct { testItStmt string runTest testBody } // checkForControllerManagerHealthy checks that the controller manager does not crash within "duration" -func checkForControllerManagerHealthy(duration time.Duration) error { +func checkForControllerManagerHealthy(ctx context.Context, duration time.Duration) error { var PID string cmd := "pidof kube-controller-manager" - for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) { - result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) + for start := time.Now(); time.Since(start) < duration && ctx.Err() == nil; time.Sleep(5 * time.Second) { + result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) if err != nil { // We don't necessarily know that it crashed, pipe could just be broken e2essh.LogResult(result) @@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { selector *metav1.LabelSelector ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes) e2eskipper.SkipIfProviderIs("local") @@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} selector = metav1.SetAsLabelSelector(volLabel) // Start the NFS server pod. - _, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + _, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"}) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, nfsServerPod) nfsPVconfig = e2epv.PersistentVolumeConfig{ NamePrefix: "nfs-", @@ -122,7 +122,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { // Get the first ready node IP that is not hosting the NFS pod. if clientNodeIP == "" { framework.Logf("Designating test node") - nodes, err := e2enode.GetReadySchedulableNodes(c) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) for _, node := range nodes.Items { if node.Name != nfsServerPod.Spec.NodeName { @@ -136,10 +136,6 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { } }) - ginkgo.AfterEach(func() { - e2epod.DeletePodWithWait(c, nfsServerPod) - }) - ginkgo.Context("when kube-controller-manager restarts", func() { var ( diskName1, diskName2 string @@ -151,12 +147,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { clientPod *v1.Pod ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessSSHKeyPresent() ginkgo.By("Initializing first PD with PVPVC binding") - pvSource1, diskName1 = createGCEVolume() + pvSource1, diskName1 = createGCEVolume(ctx) framework.ExpectNoError(err) pvConfig1 = e2epv.PersistentVolumeConfig{ NamePrefix: "gce-", @@ -164,12 +160,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { PVSource: *pvSource1, Prebind: nil, } - pv1, pvc1, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig1, pvcConfig, ns, false) + pv1, pvc1, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig1, pvcConfig, ns, false) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv1, pvc1)) ginkgo.By("Initializing second PD with PVPVC binding") - pvSource2, diskName2 = createGCEVolume() + pvSource2, diskName2 = createGCEVolume(ctx) framework.ExpectNoError(err) pvConfig2 = e2epv.PersistentVolumeConfig{ NamePrefix: "gce-", @@ -177,35 +173,35 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { PVSource: *pvSource2, Prebind: nil, } - pv2, pvc2, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig2, pvcConfig, ns, false) + pv2, pvc2, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig2, pvcConfig, ns, false) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv2, pvc2)) ginkgo.By("Attaching both PVC's to a single pod") - clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") + clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Delete client/user pod first - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod)) // Delete PV and PVCs - if errs := e2epv.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv1, pvc1); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv1, pvc1 = nil, nil - if errs := e2epv.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv2, pvc2); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv2, pvc2 = nil, nil // Delete the actual disks if diskName1 != "" { - framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName1)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName1)) } if diskName2 != "" { - framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName2)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName2)) } }) @@ -213,20 +209,20 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { e2eskipper.SkipUnlessSSHKeyPresent() ginkgo.By("Deleting PVC for volume 2") - err = e2epv.DeletePersistentVolumeClaim(c, pvc2.Name, ns) + err = e2epv.DeletePersistentVolumeClaim(ctx, c, pvc2.Name, ns) framework.ExpectNoError(err) pvc2 = nil ginkgo.By("Restarting the kube-controller-manager") - err = e2ekubesystem.RestartControllerManager() + err = e2ekubesystem.RestartControllerManager(ctx) framework.ExpectNoError(err) - err = e2ekubesystem.WaitForControllerManagerUp() + err = e2ekubesystem.WaitForControllerManagerUp(ctx) framework.ExpectNoError(err) framework.Logf("kube-controller-manager restarted") ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes") // Continue checking for 2 minutes to make sure kube-controller-manager is healthy - err = checkForControllerManagerHealthy(2 * time.Minute) + err = checkForControllerManagerHealthy(ctx, 2*time.Minute) framework.ExpectNoError(err) }) @@ -239,14 +235,14 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { pvc *v1.PersistentVolumeClaim ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { framework.Logf("Initializing test spec") - clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name) + clientPod, pv, pvc = initTestCase(ctx, f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("Tearing down test spec") - tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */) + tearDownTestCase(ctx, c, f, ns, clientPod, pvc, pv, true /* force PV delete */) pv, pvc, clientPod = nil, nil, nil }) @@ -274,7 +270,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { ginkgo.It(t.testItStmt, func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() ginkgo.By("Executing Spec") - t.runTest(c, f, clientPod, e2epod.VolumeMountPath1) + t.runTest(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) }(test) } @@ -282,8 +278,8 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { }) // createGCEVolume creates PersistentVolumeSource for GCEVolume. -func createGCEVolume() (*v1.PersistentVolumeSource, string) { - diskName, err := e2epv.CreatePDWithRetry() +func createGCEVolume(ctx context.Context) (*v1.PersistentVolumeSource, string) { + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err) return &v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ @@ -296,8 +292,8 @@ func createGCEVolume() (*v1.PersistentVolumeSource, string) { // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed // by the test. -func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { - pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) +func initTestCase(ctx context.Context, f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { + pv, pvc, err := e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) defer func() { if err != nil { ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, pvc.Name, ns) @@ -308,7 +304,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv. pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") pod.Spec.NodeName = nodeName framework.Logf("Creating NFS client pod.") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName) framework.ExpectNoError(err) defer func() { @@ -316,27 +312,27 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv. ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod) } }() - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) // Return created api objects - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod, pv, pvc } // tearDownTestCase destroy resources created by initTestCase. -func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) { +func tearDownTestCase(ctx context.Context, c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) { // Ignore deletion errors. Failing on them will interrupt test cleanup. - e2epod.DeletePodWithWait(c, client) - e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns) + e2epod.DeletePodWithWait(ctx, c, client) + e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns) if forceDeletePV && pv != nil { - e2epv.DeletePersistentVolume(c, pv.Name) + e2epv.DeletePersistentVolume(ctx, c, pv.Name) return } - err := e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 5*time.Minute) + err := e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, 5*time.Minute) framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", pv.Name) } diff --git a/test/e2e/storage/non_graceful_node_shutdown.go b/test/e2e/storage/non_graceful_node_shutdown.go index b3fec272400..41e5bfb09fd 100644 --- a/test/e2e/storage/non_graceful_node_shutdown.go +++ b/test/e2e/storage/non_graceful_node_shutdown.go @@ -66,11 +66,11 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ f := framework.NewDefaultFramework("non-graceful-shutdown") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name e2eskipper.SkipUnlessProviderIs("gce") - nodeList, err := e2enode.GetReadySchedulableNodes(c) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c) if err != nil { framework.Logf("Failed to list node: %v", err) } @@ -84,24 +84,24 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ // Install gce pd csi driver ginkgo.By("deploying csi gce-pd driver") driver := drivers.InitGcePDCSIDriver() - config := driver.PrepareTest(f) + config := driver.PrepareTest(ctx, f) dDriver, ok := driver.(storageframework.DynamicPVTestDriver) if !ok { e2eskipper.Skipf("csi driver expected DynamicPVTestDriver but got %v", driver) } ginkgo.By("Creating a gce-pd storage class") - sc := dDriver.GetDynamicProvisionStorageClass(config, "") - _, err := c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) + sc := dDriver.GetDynamicProvisionStorageClass(ctx, config, "") + _, err := c.StorageV1().StorageClasses().Create(ctx, sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create a storageclass") scName := &sc.Name deploymentName := "sts-pod-gcepd" podLabels := map[string]string{"app": deploymentName} - pod := createAndVerifyStatefulDeployment(scName, deploymentName, ns, podLabels, c) + pod := createAndVerifyStatefulDeployment(ctx, scName, deploymentName, ns, podLabels, c) oldNodeName := pod.Spec.NodeName ginkgo.By("Stopping the kubelet non gracefully for pod" + pod.Name) - utils.KubeletCommand(utils.KStop, c, pod) + utils.KubeletCommand(ctx, utils.KStop, c, pod) ginkgo.By("Adding out of service taint on node " + oldNodeName) // taint this node as out-of-service node @@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ Key: v1.TaintNodeOutOfService, Effect: v1.TaintEffectNoExecute, } - e2enode.AddOrUpdateTaintOnNode(c, oldNodeName, taint) + e2enode.AddOrUpdateTaintOnNode(ctx, c, oldNodeName, taint) ginkgo.By(fmt.Sprintf("Checking if the pod %s got rescheduled to a new node", pod.Name)) labelSelectorStr := labels.SelectorFromSet(podLabels).String() @@ -117,18 +117,18 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ LabelSelector: labelSelectorStr, FieldSelector: fields.OneTermNotEqualSelector("spec.nodeName", oldNodeName).String(), } - _, err = e2epod.WaitForAllPodsCondition(c, ns, podListOpts, 1, "running and ready", framework.PodStartTimeout, testutils.PodRunningReady) + _, err = e2epod.WaitForAllPodsCondition(ctx, c, ns, podListOpts, 1, "running and ready", framework.PodStartTimeout, testutils.PodRunningReady) framework.ExpectNoError(err) // Bring the node back online and remove the taint - utils.KubeletCommand(utils.KStart, c, pod) - e2enode.RemoveTaintOffNode(c, oldNodeName, taint) + utils.KubeletCommand(ctx, utils.KStart, c, pod) + e2enode.RemoveTaintOffNode(ctx, c, oldNodeName, taint) // Verify that a pod gets scheduled to the older node that was terminated non gracefully and now // is back online newDeploymentName := "sts-pod-gcepd-new" newPodLabels := map[string]string{"app": newDeploymentName} - createAndVerifyStatefulDeployment(scName, newDeploymentName, ns, newPodLabels, c) + createAndVerifyStatefulDeployment(ctx, scName, newDeploymentName, ns, newPodLabels, c) }) }) }) @@ -137,23 +137,23 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ // i) a pvc using the provided storage class // ii) creates a deployment with replica count 1 using the created pvc // iii) finally verifies if the pod is running and ready and returns the pod object -func createAndVerifyStatefulDeployment(scName *string, name, ns string, podLabels map[string]string, +func createAndVerifyStatefulDeployment(ctx context.Context, scName *string, name, ns string, podLabels map[string]string, c clientset.Interface) *v1.Pod { ginkgo.By("Creating a pvc using the storage class " + *scName) pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ StorageClassName: scName, }, ns) - gotPVC, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) + gotPVC, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create a persistent volume claim") ginkgo.By("Creating a deployment using the pvc " + pvc.Name) dep := makeDeployment(ns, name, gotPVC.Name, podLabels) - _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), dep, metav1.CreateOptions{}) + _, err = c.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to created the deployment") ginkgo.By(fmt.Sprintf("Ensuring that the pod of deployment %s is running and ready", dep.Name)) labelSelector := labels.SelectorFromSet(labels.Set(podLabels)) - podList, err := e2epod.WaitForPodsWithLabelRunningReady(c, ns, labelSelector, 1, framework.PodStartTimeout) + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, c, ns, labelSelector, 1, framework.PodStartTimeout) framework.ExpectNoError(err) pod := &podList.Items[0] return pod diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 78d8a036c1b..da2ab5fc6a6 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -73,17 +73,17 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { f := framework.NewDefaultFramework("pod-disks") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes) cs = f.ClientSet ns = f.Namespace.Name - e2eskipper.SkipIfMultizone(cs) + e2eskipper.SkipIfMultizone(ctx, cs) podClient = cs.CoreV1().Pods(ns) nodeClient = cs.CoreV1().Nodes() var err error - nodes, err = e2enode.GetReadySchedulableNodes(cs) + nodes, err = e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) if len(nodes.Items) < minNodes { e2eskipper.Skipf("The test requires %d schedulable nodes, got only %d", minNodes, len(nodes.Items)) @@ -141,7 +141,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } ginkgo.By("creating PD") - diskName, err := e2epv.CreatePDWithRetry() + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err, "Error creating PD") var fmtPod *v1.Pod @@ -149,12 +149,12 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { // if all test pods are RO then need a RW pod to format pd ginkgo.By("creating RW fmt Pod to ensure PD is formatted") fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) - _, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, fmtPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create fmtPod") - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) ginkgo.By("deleting the fmtPod") - framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") + framework.ExpectNoError(podClient.Delete(ctx, fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") framework.Logf("deleted fmtPod %q", fmtPod.Name) ginkgo.By("waiting for PD to detach") framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) @@ -169,17 +169,17 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.By("defer: cleaning up PD-RW test environment") framework.Logf("defer cleanup errors can usually be ignored") if fmtPod != nil { - podClient.Delete(context.TODO(), fmtPod.Name, podDelOpt) + podClient.Delete(ctx, fmtPod.Name, podDelOpt) } - podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt) - podClient.Delete(context.TODO(), host1Pod.Name, podDelOpt) - detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) + podClient.Delete(ctx, host0Pod.Name, podDelOpt) + podClient.Delete(ctx, host1Pod.Name, podDelOpt) + detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name, host1Name}) }() ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name) var containerName, testFile, testFileContents string @@ -192,35 +192,35 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) ginkgo.By("verifying PD is present in node0's VolumeInUse list") - framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) + framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) ginkgo.By("deleting host0Pod") // delete this pod before creating next pod - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.Logf("deleted host0Pod %q", host0Pod.Name) - e2epod.WaitForPodToDisappear(cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) + e2epod.WaitForPodToDisappear(ctx, cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name) } ginkgo.By("creating host1Pod on node1") - _, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, host1Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create host1Pod") - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) if readOnly { ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.Logf("deleted host0Pod %q", host0Pod.Name) } else { ginkgo.By("verifying PD contents in host1Pod") verifyPDContentsViaContainer(ns, f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents}) framework.Logf("verified PD contents in pod %q", host1Pod.Name) ginkgo.By("verifying PD is removed from node0") - framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) + framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) framework.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name) } ginkgo.By("deleting host1Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host1Pod.Name, podDelOpt), "Failed to delete host1Pod") + framework.ExpectNoError(podClient.Delete(ctx, host1Pod.Name, podDelOpt), "Failed to delete host1Pod") framework.Logf("deleted host1Pod %q", host1Pod.Name) ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes") @@ -263,7 +263,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.By(fmt.Sprintf("creating %d PD(s)", numPDs)) for i := 0; i < numPDs; i++ { - name, err := e2epv.CreatePDWithRetry() + name, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i)) diskNames = append(diskNames, name) } @@ -273,10 +273,10 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.By("defer: cleaning up PD-RW test environment") framework.Logf("defer cleanup errors can usually be ignored") if host0Pod != nil { - podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)) + podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)) } for _, diskName := range diskNames { - detachAndDeletePDs(diskName, []types.NodeName{host0Name}) + detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name}) } }() @@ -284,9 +284,9 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { framework.Logf("PD Read/Writer Iteration #%v", i) ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) - _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs)) containerName := "mycontainer" @@ -309,7 +309,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify) ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") } ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) for _, diskName := range diskNames { @@ -353,7 +353,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { origNodeCnt := len(nodes.Items) // healhy nodes running kubelet ginkgo.By("creating a pd") - diskName, err := e2epv.CreatePDWithRetry() + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err, "Error creating a pd") targetNode := &nodes.Items[0] // for node delete ops @@ -364,19 +364,19 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.By("defer: cleaning up PD-RW test env") framework.Logf("defer cleanup errors can usually be ignored") ginkgo.By("defer: delete host0Pod") - podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)) + podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)) ginkgo.By("defer: detach and delete PDs") - detachAndDeletePDs(diskName, []types.NodeName{host0Name}) + detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name}) if disruptOp == deleteNode || disruptOp == deleteNodeObj { if disruptOp == deleteNodeObj { targetNode.ObjectMeta.SetResourceVersion("0") // need to set the resource version or else the Create() fails ginkgo.By("defer: re-create host0 node object") - _, err := nodeClient.Create(context.TODO(), targetNode, metav1.CreateOptions{}) + _, err := nodeClient.Create(ctx, targetNode, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name)) } ginkgo.By("defer: verify the number of ready nodes") - numNodes := countReadyNodes(cs, host0Name) + numNodes := countReadyNodes(ctx, cs, host0Name) // if this defer is reached due to an Expect then nested // Expects are lost, so use Failf here if numNodes != origNodeCnt { @@ -386,10 +386,10 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { }) ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) ginkgo.By("waiting for host0Pod to be running") - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) ginkgo.By("writing content to host0Pod") testFile := "/testpd1/tracker" @@ -399,7 +399,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) ginkgo.By("verifying PD is present in node0's VolumeInUse list") - framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/)) + framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/)) if disruptOp == deleteNode { ginkgo.By("getting gce instances") @@ -413,7 +413,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name)) framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err)) ginkgo.By("expecting host0 node to be re-created") - numNodes := countReadyNodes(cs, host0Name) + numNodes := countReadyNodes(ctx, cs, host0Name) framework.ExpectEqual(numNodes, origNodeCnt, fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)) output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) @@ -421,9 +421,9 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } else if disruptOp == deleteNodeObj { ginkgo.By("deleting host0's node api object") - framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), *metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") + framework.ExpectNoError(nodeClient.Delete(ctx, string(host0Name), *metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") + framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") } else if disruptOp == evictPod { evictTarget := &policyv1.Eviction{ @@ -434,7 +434,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } ginkgo.By("evicting host0Pod") err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) { - if err := cs.CoreV1().Pods(ns).EvictV1(context.TODO(), evictTarget); err != nil { + if err := cs.CoreV1().Pods(ns).EvictV1(ctx, evictTarget); err != nil { framework.Logf("Failed to evict host0Pod, ignoring error: %v", err) return false, nil } @@ -453,7 +453,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { e2eskipper.SkipUnlessProviderIs("gce") ginkgo.By("delete a PD") - framework.ExpectNoError(e2epv.DeletePDWithRetry("non-exist")) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, "non-exist")) }) // This test is marked to run as serial so as device selection on AWS does not @@ -461,7 +461,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.It("[Serial] attach on previously attached volumes should work", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") ginkgo.By("creating PD") - diskName, err := e2epv.CreatePDWithRetry() + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err, "Error creating PD") // this should be safe to do because if attach fails then detach will be considered @@ -474,22 +474,22 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { pod := testPDPod([]string{diskName}, host0Name /*readOnly*/, false, 1) ginkgo.By("Creating test pod with same volume") - _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = podClient.Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)) ginkgo.By("deleting the pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod") + framework.ExpectNoError(podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod") framework.Logf("deleted pod %q", pod.Name) ginkgo.By("waiting for PD to detach") framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) }) }) -func countReadyNodes(c clientset.Interface, hostName types.NodeName) int { - e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout) - e2enode.WaitForAllNodesSchedulable(c, nodeStatusTimeout) - nodes, err := e2enode.GetReadySchedulableNodes(c) +func countReadyNodes(ctx context.Context, c clientset.Interface, hostName types.NodeName) int { + e2enode.WaitForNodeToBeReady(ctx, c, string(hostName), nodeStatusTimeout) + e2enode.WaitForAllNodesSchedulable(ctx, c, nodeStatusTimeout) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) return len(nodes.Items) } @@ -672,7 +672,7 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error { return nil } -func detachAndDeletePDs(diskName string, hosts []types.NodeName) { +func detachAndDeletePDs(ctx context.Context, diskName string, hosts []types.NodeName) { for _, host := range hosts { framework.Logf("Detaching GCE PD %q from node %q.", diskName, host) detachPD(host, diskName) @@ -680,10 +680,11 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) { waitForPDDetach(diskName, host) } ginkgo.By(fmt.Sprintf("Deleting PD %q", diskName)) - framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName)) } func waitForPDInVolumesInUse( + ctx context.Context, nodeClient v1core.NodeInterface, diskName string, nodeName types.NodeName, @@ -695,7 +696,7 @@ func waitForPDInVolumesInUse( } framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) { - nodeObj, err := nodeClient.Get(context.TODO(), string(nodeName), metav1.GetOptions{}) + nodeObj, err := nodeClient.Get(ctx, string(nodeName), metav1.GetOptions{}) if err != nil || nodeObj == nil { framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err) continue diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index f0f8ed6c1ed..8af5609c107 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -45,14 +45,14 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { } // initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up. -func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { +func initializeGCETestSpec(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { ginkgo.By("Creating the PV and PVC") - pv, pvc, err := e2epv.CreatePVPVC(c, t, pvConfig, pvcConfig, ns, isPrebound) + pv, pvc, err := e2epv.CreatePVPVC(ctx, c, t, pvConfig, pvcConfig, ns, isPrebound) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, t, ns, pv, pvc)) ginkgo.By("Creating the Client Pod") - clientPod, err := e2epod.CreateClientPod(c, ns, pvc) + clientPod, err := e2epod.CreateClientPod(ctx, c, ns, pvc) framework.ExpectNoError(err) return clientPod, pv, pvc } @@ -76,7 +76,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f f := framework.NewDefaultFramework("pv") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name @@ -86,7 +86,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f e2eskipper.SkipUnlessProviderIs("gce", "gke") ginkgo.By("Initializing Test Spec") - diskName, err = e2epv.CreatePDWithRetry() + diskName, err = e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err) pvConfig = e2epv.PersistentVolumeConfig{ @@ -106,20 +106,20 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f Selector: selector, StorageClassName: &emptyStorageClass, } - clientPod, pv, pvc = initializeGCETestSpec(c, f.Timeouts, ns, pvConfig, pvcConfig, false) + clientPod, pv, pvc = initializeGCETestSpec(ctx, c, f.Timeouts, ns, pvConfig, pvcConfig, false) node = types.NodeName(clientPod.Spec.NodeName) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up test resources") if c != nil { - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod)) - if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod)) + if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } clientPod, pv, pvc, node = nil, nil, nil, "" if diskName != "" { - framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName)) } } }) @@ -129,11 +129,11 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func(ctx context.Context) { ginkgo.By("Deleting the Claim") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true) ginkgo.By("Deleting the Pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) ginkgo.By("Verifying Persistent Disk detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") @@ -144,11 +144,11 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func(ctx context.Context) { ginkgo.By("Deleting the Persistent Volume") - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectEqual(verifyGCEDiskAttached(diskName, node), true) ginkgo.By("Deleting the client pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) ginkgo.By("Verifying Persistent Disk detaches") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") @@ -158,12 +158,12 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func(ctx context.Context) { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) + err := c.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) framework.ExpectNoError(err) // issue deletes for the client pod and claim, accelerating namespace controller actions - e2epod.DeletePodOrFail(c, clientPod.Namespace, clientPod.Name) - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) + e2epod.DeletePodOrFail(ctx, c, clientPod.Namespace, clientPod.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) ginkgo.By("Verifying Persistent Disk detaches") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 7b2c0ee0a21..a0d78771d00 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -158,8 +158,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { scName string ) - ginkgo.BeforeEach(func() { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) + ginkgo.BeforeEach(func(ctx context.Context) { + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes) framework.ExpectNoError(err) scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name) @@ -195,18 +195,18 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.Context(ctxString, func() { var testVol *localTestVolume - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if testVolType == GCELocalSSDVolumeType { - SkipUnlessLocalSSDExists(config, "scsi", "fs", config.randomNode) + SkipUnlessLocalSSDExists(ctx, config, "scsi", "fs", config.randomNode) } - setupStorageClass(config, &testMode) - testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, testMode) + setupStorageClass(ctx, config, &testMode) + testVols := setupLocalVolumesPVCsPVs(ctx, config, testVolType, config.randomNode, 1, testMode) testVol = testVols[0] }) - ginkgo.AfterEach(func() { - cleanupLocalVolumes(config, []*localTestVolume{testVol}) - cleanupStorageClass(config) + ginkgo.AfterEach(func(ctx context.Context) { + cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) + cleanupStorageClass(ctx, config) }) ginkgo.Context("One pod requesting one prebound PVC", func() { @@ -215,11 +215,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod1Err error ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Creating pod1") - pod1, pod1Err = createLocalPod(config, testVol, nil) + pod1, pod1Err = createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod1Err) - verifyLocalPod(config, testVol, pod1, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod1, config.randomNode.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -227,9 +227,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { podRWCmdExec(f, pod1, writeCmd) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting pod1") - e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name) }) ginkgo.It("should be able to mount volume and read from pod1", func(ctx context.Context) { @@ -250,13 +250,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.Context("Two pods mounting a local volume at the same time", func() { ginkgo.It("should be able to write from pod1 and read from pod2", func(ctx context.Context) { - twoPodsReadWriteTest(f, config, testVol) + twoPodsReadWriteTest(ctx, f, config, testVol) }) }) ginkgo.Context("Two pods mounting a local volume one after the other", func() { ginkgo.It("should be able to write from pod1 and read from pod2", func(ctx context.Context) { - twoPodsReadWriteSerialTest(f, config, testVol) + twoPodsReadWriteSerialTest(ctx, f, config, testVol) }) }) @@ -269,35 +269,35 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.It("should set fsGroup for one pod [Slow]", func(ctx context.Context) { ginkgo.By("Checking fsGroup is set") - pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) + pod := createPodWithFsGroupTest(ctx, config, testVol, 1234, 1234) ginkgo.By("Deleting pod") - e2epod.DeletePodOrFail(config.client, config.ns, pod.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod.Name) }) ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func(ctx context.Context) { fsGroup := int64(1234) ginkgo.By("Create first pod and check fsGroup is set") - pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) + pod1 := createPodWithFsGroupTest(ctx, config, testVol, fsGroup, fsGroup) ginkgo.By("Create second pod with same fsGroup and check fsGroup is correct") - pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) + pod2 := createPodWithFsGroupTest(ctx, config, testVol, fsGroup, fsGroup) ginkgo.By("Deleting first pod") - e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name) ginkgo.By("Deleting second pod") - e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name) }) ginkgo.It("should set different fsGroup for second pod if first pod is deleted [Flaky]", func(ctx context.Context) { // TODO: Disabled temporarily, remove [Flaky] tag after #73168 is fixed. fsGroup1, fsGroup2 := int64(1234), int64(4321) ginkgo.By("Create first pod and check fsGroup is set") - pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) + pod1 := createPodWithFsGroupTest(ctx, config, testVol, fsGroup1, fsGroup1) ginkgo.By("Deleting first pod") - err := e2epod.DeletePodWithWait(config.client, pod1) + err := e2epod.DeletePodWithWait(ctx, config.client, pod1) framework.ExpectNoError(err, "while deleting first pod") ginkgo.By("Create second pod and check fsGroup is the new one") - pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) + pod2 := createPodWithFsGroupTest(ctx, config, testVol, fsGroup2, fsGroup2) ginkgo.By("Deleting second pod") - e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name) }) }) @@ -316,12 +316,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { localVolumeType: DirectoryLocalVolumeType, } ginkgo.By("Creating local PVC and PV") - createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) - pod, err := createLocalPod(config, testVol, nil) + createLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}, immediateMode) + pod, err := createLocalPod(ctx, config, testVol, nil) framework.ExpectError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectError(err) - cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) + cleanupLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}) }) ginkgo.It("should fail due to wrong node", func(ctx context.Context) { @@ -329,7 +329,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { e2eskipper.Skipf("Runs only when number of nodes >= 2") } - testVols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, config.randomNode, 1, immediateMode) + testVols := setupLocalVolumesPVCsPVs(ctx, config, DirectoryLocalVolumeType, config.randomNode, 1, immediateMode) testVol := testVols[0] conflictNodeName := config.nodes[0].Name @@ -337,13 +337,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { conflictNodeName = config.nodes[1].Name } pod := makeLocalPodWithNodeName(config, testVol, conflictNodeName) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectError(err) - cleanupLocalVolumes(config, []*localTestVolume{testVol}) + cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) }) }) @@ -354,14 +354,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { conflictNodeName string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if len(config.nodes) < 2 { e2eskipper.Skipf("Runs only when number of nodes >= 2") } volumeType = DirectoryLocalVolumeType - setupStorageClass(config, &immediateMode) - testVols := setupLocalVolumesPVCsPVs(config, volumeType, config.randomNode, 1, immediateMode) + setupStorageClass(ctx, config, &immediateMode) + testVols := setupLocalVolumesPVCsPVs(ctx, config, volumeType, config.randomNode, 1, immediateMode) conflictNodeName = config.nodes[0].Name if conflictNodeName == config.randomNode.Name { conflictNodeName = config.nodes[1].Name @@ -370,17 +370,17 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { testVol = testVols[0] }) - ginkgo.AfterEach(func() { - cleanupLocalVolumes(config, []*localTestVolume{testVol}) - cleanupStorageClass(config) + ginkgo.AfterEach(func(ctx context.Context) { + cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) + cleanupStorageClass(ctx, config) }) ginkgo.It("should fail scheduling due to different NodeAffinity", func(ctx context.Context) { - testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeAffinity) + testPodWithNodeConflict(ctx, config, testVol, conflictNodeName, makeLocalPodWithNodeAffinity) }) ginkgo.It("should fail scheduling due to different NodeSelector", func(ctx context.Context) { - testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeSelector) + testPodWithNodeConflict(ctx, config, testVol, conflictNodeName, makeLocalPodWithNodeSelector) }) }) @@ -391,23 +391,23 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { volsPerNode = 6 ) - ginkgo.BeforeEach(func() { - setupStorageClass(config, &waitMode) + ginkgo.BeforeEach(func(ctx context.Context) { + setupStorageClass(ctx, config, &waitMode) testVols = map[string][]*localTestVolume{} for i, node := range config.nodes { // The PVCs created here won't be used ginkgo.By(fmt.Sprintf("Setting up local volumes on node %q", node.Name)) - vols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, &config.nodes[i], volsPerNode, waitMode) + vols := setupLocalVolumesPVCsPVs(ctx, config, DirectoryLocalVolumeType, &config.nodes[i], volsPerNode, waitMode) testVols[node.Name] = vols } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { for _, vols := range testVols { - cleanupLocalVolumes(config, vols) + cleanupLocalVolumes(ctx, config, vols) } - cleanupStorageClass(config) + cleanupStorageClass(ctx, config) }) ginkgo.It("should use volumes spread across nodes when pod has anti-affinity", func(ctx context.Context) { @@ -415,14 +415,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { e2eskipper.Skipf("Runs only when number of nodes >= %v", ssReplicas) } ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes") - ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false) - validateStatefulSet(config, ss, true) + ss := createStatefulSet(ctx, config, ssReplicas, volsPerNode, true, false) + validateStatefulSet(ctx, config, ss, true) }) ginkgo.It("should use volumes on one node when pod has affinity", func(ctx context.Context) { ginkgo.By("Creating a StatefulSet with pod affinity on nodes") - ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false) - validateStatefulSet(config, ss, false) + ss := createStatefulSet(ctx, config, ssReplicas, volsPerNode/ssReplicas, false, false) + validateStatefulSet(ctx, config, ss, false) }) ginkgo.It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func(ctx context.Context) { @@ -430,14 +430,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { e2eskipper.Skipf("Runs only when number of nodes >= %v", ssReplicas) } ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes") - ss := createStatefulSet(config, ssReplicas, 1, true, true) - validateStatefulSet(config, ss, true) + ss := createStatefulSet(ctx, config, ssReplicas, 1, true, true) + validateStatefulSet(ctx, config, ss, true) }) ginkgo.It("should use volumes on one node when pod management is parallel and pod has affinity", func(ctx context.Context) { ginkgo.By("Creating a StatefulSet with pod affinity on nodes") - ss := createStatefulSet(config, ssReplicas, 1, false, true) - validateStatefulSet(config, ss, false) + ss := createStatefulSet(ctx, config, ssReplicas, 1, false, true) + validateStatefulSet(ctx, config, ss, false) }) }) @@ -455,18 +455,18 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { podsFactor = 4 ) - ginkgo.BeforeEach(func() { - setupStorageClass(config, &waitMode) + ginkgo.BeforeEach(func(ctx context.Context) { + setupStorageClass(ctx, config, &waitMode) for i, node := range config.nodes { ginkgo.By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name)) - allLocalVolumes[node.Name] = setupLocalVolumes(config, volType, &config.nodes[i], volsPerNode) + allLocalVolumes[node.Name] = setupLocalVolumes(ctx, config, volType, &config.nodes[i], volsPerNode) } ginkgo.By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes))) var err error for _, localVolumes := range allLocalVolumes { for _, localVolume := range localVolumes { pvConfig := makeLocalPVConfig(config, localVolume) - localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) + localVolume.pv, err = e2epv.CreatePV(ctx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) } } @@ -475,7 +475,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { go func() { defer ginkgo.GinkgoRecover() defer wg.Done() - w, err := config.client.CoreV1().PersistentVolumes().Watch(context.TODO(), metav1.ListOptions{}) + w, err := config.client.CoreV1().PersistentVolumes().Watch(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) if w == nil { return @@ -494,7 +494,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { if pv.Status.Phase == v1.VolumeBound || pv.Status.Phase == v1.VolumeAvailable { continue } - pv, err = config.client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = config.client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { continue } @@ -505,10 +505,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { if localVolume.pv.Name != pv.Name { continue } - err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) + err = config.client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) pvConfig := makeLocalPVConfig(config, localVolume) - localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) + localVolume.pv, err = e2epv.CreatePV(ctx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) } } @@ -519,16 +519,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }() }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Stop and wait for recycle goroutine to finish") close(stopCh) wg.Wait() ginkgo.By("Clean all PVs") for nodeName, localVolumes := range allLocalVolumes { ginkgo.By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName)) - cleanupLocalVolumes(config, localVolumes) + cleanupLocalVolumes(ctx, config, localVolumes) } - cleanupStorageClass(config) + cleanupStorageClass(ctx, config) }) ginkgo.It("should be able to process many pods and reuse local volumes", func(ctx context.Context) { @@ -565,7 +565,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pvcs := []*v1.PersistentVolumeClaim{} for j := 0; j < volsPerPod; j++ { pvc := e2epv.MakePersistentVolumeClaim(makeLocalPVCConfig(config, volType), config.ns) - pvc, err := e2epv.CreatePVC(config.client, config.ns, pvc) + pvc, err := e2epv.CreatePVC(ctx, config.client, config.ns, pvc) framework.ExpectNoError(err) pvcs = append(pvcs, pvc) } @@ -577,7 +577,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } pod, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err) - pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod numCreated++ @@ -590,7 +590,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { defer podsLock.Unlock() for _, pod := range pods { - if err := deletePodAndPVCs(config, pod); err != nil { + if err := deletePodAndPVCs(ctx, config, pod); err != nil { framework.Logf("Deleting pod %v failed: %v", pod.Name, err) } } @@ -598,8 +598,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Waiting for all pods to complete successfully") const completeTimeout = 5 * time.Minute - waitErr := wait.PollImmediate(time.Second, completeTimeout, func() (done bool, err error) { - podsList, err := config.client.CoreV1().Pods(config.ns).List(context.TODO(), metav1.ListOptions{}) + waitErr := wait.PollImmediateWithContext(ctx, time.Second, completeTimeout, func(ctx context.Context) (done bool, err error) { + podsList, err := config.client.CoreV1().Pods(config.ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -610,7 +610,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { for _, pod := range podsList.Items { if pod.Status.Phase == v1.PodSucceeded { // Delete pod and its PVCs - if err := deletePodAndPVCs(config, &pod); err != nil { + if err := deletePodAndPVCs(ctx, config, &pod); err != nil { return false, err } delete(pods, pod.Name) @@ -630,7 +630,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pv *v1.PersistentVolume ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { localVolume := &localTestVolume{ ltr: &utils.LocalTestResource{ Node: config.randomNode, @@ -640,16 +640,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } pvConfig := makeLocalPVConfig(config, localVolume) var err error - pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) + pv, err = e2epv.CreatePV(ctx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if pv == nil { return } ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name)) - err := config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) + err := config.client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -662,7 +662,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ) pvc = e2epv.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns) ginkgo.By(fmt.Sprintf("Create a PVC %s", pvc.Name)) - pvc, err = e2epv.CreatePVC(config.client, config.ns, pvc) + pvc, err = e2epv.CreatePVC(ctx, config.client, config.ns, pvc) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) podConfig := e2epod.Config{ @@ -674,14 +674,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err) - pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod } ginkgo.By("Wait for all pods are running") const runningTimeout = 5 * time.Minute waitErr := wait.PollImmediate(time.Second, runningTimeout, func() (done bool, err error) { - podsList, err := config.client.CoreV1().Pods(config.ns).List(context.TODO(), metav1.ListOptions{}) + podsList, err := config.client.CoreV1().Pods(config.ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -699,9 +699,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) }) -func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { +func deletePodAndPVCs(ctx context.Context, config *localTestConfig, pod *v1.Pod) error { framework.Logf("Deleting pod %v", pod.Name) - if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { + if err := config.client.CoreV1().Pods(config.ns).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil { return err } @@ -709,7 +709,7 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { for _, vol := range pod.Spec.Volumes { pvcSource := vol.VolumeSource.PersistentVolumeClaim if pvcSource != nil { - if err := e2epv.DeletePersistentVolumeClaim(config.client, pvcSource.ClaimName, config.ns); err != nil { + if err := e2epv.DeletePersistentVolumeClaim(ctx, config.client, pvcSource.ClaimName, config.ns); err != nil { return err } } @@ -719,25 +719,25 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod -func testPodWithNodeConflict(config *localTestConfig, testVol *localTestVolume, nodeName string, makeLocalPodFunc makeLocalPodWith) { +func testPodWithNodeConflict(ctx context.Context, config *localTestConfig, testVol *localTestVolume, nodeName string, makeLocalPodFunc makeLocalPodWith) { ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVol.localVolumeType)) pod := makeLocalPodFunc(config, testVol, nodeName) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, config.client, pod.Name, pod.Namespace) framework.ExpectNoError(err) } // The tests below are run against multiple mount point types // Test two pods at the same time, write from pod1, and read from pod2 -func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) { +func twoPodsReadWriteTest(ctx context.Context, f *framework.Framework, config *localTestConfig, testVol *localTestVolume) { ginkgo.By("Creating pod1 to write to the PV") - pod1, pod1Err := createLocalPod(config, testVol, nil) + pod1, pod1Err := createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod1Err) - verifyLocalPod(config, testVol, pod1, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod1, config.randomNode.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -748,9 +748,9 @@ func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testV testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) ginkgo.By("Creating pod2 to read from the PV") - pod2, pod2Err := createLocalPod(config, testVol, nil) + pod2, pod2Err := createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod2Err) - verifyLocalPod(config, testVol, pod2, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod2, config.randomNode.Name) // testFileContent was written after creating pod1 testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) @@ -764,17 +764,17 @@ func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testV testReadFileContent(f, volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType) ginkgo.By("Deleting pod1") - e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name) ginkgo.By("Deleting pod2") - e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name) } // Test two pods one after other, write from pod1, and read from pod2 -func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) { +func twoPodsReadWriteSerialTest(ctx context.Context, f *framework.Framework, config *localTestConfig, testVol *localTestVolume) { ginkgo.By("Creating pod1") - pod1, pod1Err := createLocalPod(config, testVol, nil) + pod1, pod1Err := createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod1Err) - verifyLocalPod(config, testVol, pod1, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod1, config.randomNode.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -785,30 +785,30 @@ func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig, testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) ginkgo.By("Deleting pod1") - e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name) ginkgo.By("Creating pod2") - pod2, pod2Err := createLocalPod(config, testVol, nil) + pod2, pod2Err := createLocalPod(ctx, config, testVol, nil) framework.ExpectNoError(pod2Err) - verifyLocalPod(config, testVol, pod2, config.randomNode.Name) + verifyLocalPod(ctx, config, testVol, pod2, config.randomNode.Name) ginkgo.By("Reading in pod2") testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) ginkgo.By("Deleting pod2") - e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name) } // Test creating pod with fsGroup, and check fsGroup is expected fsGroup. -func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod { - pod, err := createLocalPod(config, testVol, &fsGroup) +func createPodWithFsGroupTest(ctx context.Context, config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod { + pod, err := createLocalPod(ctx, config, testVol, &fsGroup) framework.ExpectNoError(err) _, err = e2eoutput.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3) framework.ExpectNoError(err, "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name) return pod } -func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMode) { +func setupStorageClass(ctx context.Context, config *localTestConfig, mode *storagev1.VolumeBindingMode) { sc := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: config.scName, @@ -817,29 +817,29 @@ func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMod VolumeBindingMode: mode, } - _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) + _, err := config.client.StorageV1().StorageClasses().Create(ctx, sc, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func cleanupStorageClass(config *localTestConfig) { - framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(context.TODO(), config.scName, metav1.DeleteOptions{})) +func cleanupStorageClass(ctx context.Context, config *localTestConfig) { + framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(ctx, config.scName, metav1.DeleteOptions{})) } // podNode wraps RunKubectl to get node where pod is running -func podNodeName(config *localTestConfig, pod *v1.Pod) (string, error) { - runtimePod, runtimePodErr := config.client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) +func podNodeName(ctx context.Context, config *localTestConfig, pod *v1.Pod) (string, error) { + runtimePod, runtimePodErr := config.client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) return runtimePod.Spec.NodeName, runtimePodErr } // setupLocalVolumes sets up directories to use for local PV -func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType, node *v1.Node, count int) []*localTestVolume { +func setupLocalVolumes(ctx context.Context, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node, count int) []*localTestVolume { vols := []*localTestVolume{} for i := 0; i < count; i++ { ltrType, ok := setupLocalVolumeMap[localVolumeType] if !ok { framework.Failf("Invalid localVolumeType: %v", localVolumeType) } - ltr := config.ltrMgr.Create(node, ltrType, nil) + ltr := config.ltrMgr.Create(ctx, node, ltrType, nil) vols = append(vols, &localTestVolume{ ltr: ltr, localVolumeType: localVolumeType, @@ -848,10 +848,10 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType, return vols } -func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) { +func cleanupLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes []*localTestVolume) { for _, volume := range volumes { ginkgo.By("Cleaning up PVC and PV") - errs := e2epv.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc) + errs := e2epv.PVPVCCleanup(ctx, config.client, config.ns, volume.pv, volume.pvc) if len(errs) > 0 { framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) } @@ -859,20 +859,20 @@ func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) { } // Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory -func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) { - cleanupLocalPVCsPVs(config, volumes) +func cleanupLocalVolumes(ctx context.Context, config *localTestConfig, volumes []*localTestVolume) { + cleanupLocalPVCsPVs(ctx, config, volumes) for _, volume := range volumes { - config.ltrMgr.Remove(volume.ltr) + config.ltrMgr.Remove(ctx, volume.ltr) } } -func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) { - framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.timeouts, config.ns, volume.pv, volume.pvc)) +func verifyLocalVolume(ctx context.Context, config *localTestConfig, volume *localTestVolume) { + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, config.client, config.timeouts, config.ns, volume.pv, volume.pvc)) } -func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) { - podNodeName, err := podNodeName(config, pod) +func verifyLocalPod(ctx context.Context, config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) { + podNodeName, err := podNodeName(ctx, config, pod) framework.ExpectNoError(err) framework.Logf("pod %q created on Node %q", pod.Name, podNodeName) framework.ExpectEqual(podNodeName, expectedNodeName) @@ -934,20 +934,20 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) e2epv.P } // Creates a PVC and PV with prebinding -func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mode storagev1.VolumeBindingMode) { +func createLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes []*localTestVolume, mode storagev1.VolumeBindingMode) { var err error for _, volume := range volumes { pvcConfig := makeLocalPVCConfig(config, volume.localVolumeType) pvConfig := makeLocalPVConfig(config, volume) - volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, config.timeouts, pvConfig, pvcConfig, config.ns, false) + volume.pv, volume.pvc, err = e2epv.CreatePVPVC(ctx, config.client, config.timeouts, pvConfig, pvcConfig, config.ns, false) framework.ExpectNoError(err) } if mode == storagev1.VolumeBindingImmediate { for _, volume := range volumes { - verifyLocalVolume(config, volume) + verifyLocalVolume(ctx, config, volume) } } else { // Verify PVCs are not bound by waiting for phase==bound with a timeout and asserting that we hit the timeout. @@ -955,7 +955,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod const bindTimeout = 10 * time.Second waitErr := wait.PollImmediate(time.Second, bindTimeout, func() (done bool, err error) { for _, volume := range volumes { - pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(context.TODO(), volume.pvc.Name, metav1.GetOptions{}) + pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(ctx, volume.pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get PVC %s/%s: %v", volume.pvc.Namespace, volume.pvc.Name, err) } @@ -1037,7 +1037,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, return } -func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { +func createLocalPod(ctx context.Context, config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { ginkgo.By("Creating a pod") podConfig := e2epod.Config{ NS: config.ns, @@ -1045,7 +1045,7 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i SeLinuxLabel: selinuxLabel, FsGroup: fsGroup, } - return e2epod.CreateSecPod(config.client, &podConfig, config.timeouts.PodStart) + return e2epod.CreateSecPod(ctx, config.client, &podConfig, config.timeouts.PodStart) } func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string { @@ -1097,6 +1097,7 @@ func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string { // Initialize test volume on node // and create local PVC and PV func setupLocalVolumesPVCsPVs( + ctx context.Context, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node, @@ -1104,10 +1105,10 @@ func setupLocalVolumesPVCsPVs( mode storagev1.VolumeBindingMode) []*localTestVolume { ginkgo.By("Initializing test volumes") - testVols := setupLocalVolumes(config, localVolumeType, node, count) + testVols := setupLocalVolumes(ctx, config, localVolumeType, node, count) ginkgo.By("Creating local PVCs and PVs") - createLocalPVCsPVs(config, testVols, mode) + createLocalPVCsPVs(ctx, config, testVols, mode) return testVols } @@ -1135,7 +1136,7 @@ func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentV return &claim } -func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet { +func createStatefulSet(ctx context.Context, config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet { mounts := []v1.VolumeMount{} claims := []v1.PersistentVolumeClaim{} for i := 0; i < volumeCount; i++ { @@ -1206,15 +1207,15 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in spec.Spec.PodManagementPolicy = appsv1.ParallelPodManagement } - ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{}) + ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(ctx, spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(config.client, ssReplicas, ss) + e2estatefulset.WaitForRunningAndReady(ctx, config.client, ssReplicas, ss) return ss } -func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) { - pods := e2estatefulset.GetPodList(config.client, ss) +func validateStatefulSet(ctx context.Context, config *localTestConfig, ss *appsv1.StatefulSet, anti bool) { + pods := e2estatefulset.GetPodList(ctx, config.client, ss) nodes := sets.NewString() for _, pod := range pods.Items { @@ -1234,7 +1235,7 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b for _, volume := range pod.Spec.Volumes { pvcSource := volume.VolumeSource.PersistentVolumeClaim if pvcSource != nil { - err := e2epv.WaitForPersistentVolumeClaimPhase( + err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, config.client, config.ns, pvcSource.ClaimName, framework.Poll, time.Second) framework.ExpectNoError(err) } @@ -1244,9 +1245,9 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b // SkipUnlessLocalSSDExists takes in an ssdInterface (scsi/nvme) and a filesystemType (fs/block) // and skips if a disk of that type does not exist on the node -func SkipUnlessLocalSSDExists(config *localTestConfig, ssdInterface, filesystemType string, node *v1.Node) { +func SkipUnlessLocalSSDExists(ctx context.Context, config *localTestConfig, ssdInterface, filesystemType string, node *v1.Node) { ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType) - res, err := config.hostExec.Execute(ssdCmd, node) + res, err := config.hostExec.Execute(ctx, ssdCmd, node) utils.LogResult(res) framework.ExpectNoError(err) num, err := strconv.Atoi(strings.TrimSpace(res.Stdout)) diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 8312175cc23..09162b111a7 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -41,19 +41,19 @@ import ( // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's // phase. Note: the PV is deleted in the AfterEach, not here. -func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { +func completeTest(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { // 1. verify that the PV and PVC have bound correctly ginkgo.By("Validating the PV-PVC binding") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) // 2. create the nfs writer pod, test if the write was successful, // then delete the pod and verify that it was deleted ginkgo.By("Checking pod has write access to PersistentVolume") - framework.ExpectNoError(createWaitAndDeletePod(c, f.Timeouts, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")) + framework.ExpectNoError(createWaitAndDeletePod(ctx, c, f.Timeouts, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")) // 3. delete the PVC, wait for PV to become "Released" ginkgo.By("Deleting the PVC to invoke the reclaim policy.") - framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased)) + framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased)) } // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate @@ -62,13 +62,13 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv * // Note: this func is serialized, we wait for each pod to be deleted before creating the // // next pod. Adding concurrency is a TODO item. -func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols e2epv.PVMap, claims e2epv.PVCMap, expectPhase v1.PersistentVolumePhase) error { +func completeMultiTest(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string, pvols e2epv.PVMap, claims e2epv.PVCMap, expectPhase v1.PersistentVolumePhase) error { var err error // 1. verify each PV permits write access to a client pod ginkgo.By("Checking pod has write access to PersistentVolumes") for pvcKey := range claims { - pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(context.TODO(), pvcKey.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err) } @@ -81,14 +81,14 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName) } // TODO: currently a serialized test of each PV - if err = createWaitAndDeletePod(c, f.Timeouts, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil { + if err = createWaitAndDeletePod(ctx, c, f.Timeouts, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil { return err } } // 2. delete each PVC, wait for its bound PV to reach `expectedPhase` ginkgo.By("Deleting PVCs to invoke reclaim policy") - if err = e2epv.DeletePVCandValidatePVGroup(c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil { + if err = e2epv.DeletePVCandValidatePVGroup(ctx, c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil { return err } return nil @@ -129,8 +129,8 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { serverHost string ) - ginkgo.BeforeEach(func() { - _, nfsServerPod, serverHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + ginkgo.BeforeEach(func(ctx context.Context) { + _, nfsServerPod, serverHost = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"}) pvConfig = e2epv.PersistentVolumeConfig{ NamePrefix: "nfs-", Labels: volLabel, @@ -149,17 +149,17 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { } }) - ginkgo.AfterEach(func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name) + ginkgo.AfterEach(func(ctx context.Context) { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name) pv, pvc = nil, nil pvConfig, pvcConfig = e2epv.PersistentVolumeConfig{}, e2epv.PersistentVolumeClaimConfig{} }) ginkgo.Context("with Single PV - PVC pairs", func() { // Note: this is the only code where the pv is deleted. - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up test resources.") - if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -170,36 +170,36 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // contains the claim. Verify that the PV and PVC bind correctly, and // that the pod can write to the nfs volume. ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func(ctx context.Context) { - pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) - completeTest(f, c, ns, pv, pvc) + completeTest(ctx, f, c, ns, pv, pvc) }) // Create a claim first, then a nfs PV that matches the claim, and a // pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PVC and non-pre-bound PV: test write access", func(ctx context.Context) { - pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVCPV(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) - completeTest(f, c, ns, pv, pvc) + completeTest(ctx, f, c, ns, pv, pvc) }) // Create a claim first, then a pre-bound nfs PV that matches the claim, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PVC and a pre-bound PV: test write access", func(ctx context.Context) { - pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVCPV(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) - completeTest(f, c, ns, pv, pvc) + completeTest(ctx, f, c, ns, pv, pvc) }) // Create a nfs PV first, then a pre-bound PVC that matches the PV, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PV and a pre-bound PVC: test write access", func(ctx context.Context) { - pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) - completeTest(f, c, ns, pv, pvc) + completeTest(ctx, f, c, ns, pv, pvc) }) }) @@ -219,9 +219,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { var pvols e2epv.PVMap var claims e2epv.PVCMap - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols)) - errs := e2epv.PVPVCMapCleanup(c, ns, pvols, claims) + errs := e2epv.PVPVCMapCleanup(ctx, c, ns, pvols, claims) if len(errs) > 0 { errmsg := []string{} for _, e := range errs { @@ -235,30 +235,30 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Note: PVs are created before claims and no pre-binding ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func(ctx context.Context) { numPVs, numPVCs := 2, 4 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) - framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) + framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true)) + framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased)) }) // Create 3 PVs and 3 PVCs. // Note: PVs are created before claims and no pre-binding ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func(ctx context.Context) { numPVs, numPVCs := 3, 3 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) - framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) + framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true)) + framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased)) }) // Create 4 PVs and 2 PVCs. // Note: PVs are created before claims and no pre-binding. ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func(ctx context.Context) { numPVs, numPVCs := 4, 2 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) - framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) + framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true)) + framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased)) }) }) @@ -266,16 +266,16 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Recycler, this entire context can be removed without affecting the test suite or leaving behind // dead code. ginkgo.Context("when invoking the Recycle reclaim policy", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle - pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up test resources.") - if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -286,34 +286,34 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) { ginkgo.By("Writing to the volume.") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart)) ginkgo.By("Deleting the claim") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) - framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) + framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable)) ginkgo.By("Re-mounting the volume.") pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns) - pvc, err = e2epv.CreatePVC(c, ns, pvc) + pvc, err = e2epv.CreatePVC(ctx, c, ns, pvc) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name) // If a file is detected in /mnt, fail the pod and do not restart it. ginkgo.By("Verifying the mount has been cleaned.") mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart)) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) framework.Logf("Pod exited without failure; the volume has been recycled.") // Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup. framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.") - framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable)) + framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable)) pvc = nil }) }) @@ -327,13 +327,13 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.Describe("Default StorageClass [LinuxOnly]", func() { ginkgo.Context("pods that use multiple volumes", func() { - ginkgo.AfterEach(func() { - e2estatefulset.DeleteAllStatefulSets(c, ns) + ginkgo.AfterEach(func(ctx context.Context) { + e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) ginkgo.It("should be reschedulable [Slow]", func(ctx context.Context) { // Only run on providers with default storageclass - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) numVols := 4 @@ -366,16 +366,16 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { } spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) - ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, 1, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 1, ss) ginkgo.By("Deleting the StatefulSet but not the volumes") // Scale down to 0 first so that the Delete is quick - ss, err = e2estatefulset.Scale(c, ss, 0) + ss, err = e2estatefulset.Scale(ctx, c, ss, 0) framework.ExpectNoError(err) - e2estatefulset.WaitForStatusReplicas(c, ss, 0) - err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) + e2estatefulset.WaitForStatusReplicas(ctx, c, ss, 0) + err = c.AppsV1().StatefulSets(ns).Delete(ctx, ss.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating a new Statefulset and validating the data") @@ -386,9 +386,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { validateCmd += "&& sleep 10000" spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe) - ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Create(ctx, spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2estatefulset.WaitForRunningAndReady(c, 1, ss) + e2estatefulset.WaitForRunningAndReady(ctx, c, 1, ss) }) }) }) @@ -445,21 +445,21 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v // Note: need named return value so that the err assignment in the defer sets the returned error. // // Has been shown to be necessary using Go 1.7. -func createWaitAndDeletePod(c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { +func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { framework.Logf("Creating nfs test pod") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) - runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("pod Create API error: %v", err) } defer func() { - delErr := e2epod.DeletePodWithWait(c, runPod) + delErr := e2epod.DeletePodWithWait(ctx, c, runPod) if err == nil { // don't override previous err value err = delErr // assign to returned err, can be nil } }() - err = testPodSuccessOrFail(c, t, ns, runPod) + err = testPodSuccessOrFail(ctx, c, t, ns, runPod) if err != nil { return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err) } @@ -467,9 +467,9 @@ func createWaitAndDeletePod(c clientset.Interface, t *framework.TimeoutContext, } // testPodSuccessOrFail tests whether the pod's exit code is zero. -func testPodSuccessOrFail(c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error { +func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error { framework.Logf("Pod should terminate with exitcode 0 (success)") - if err := e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, t.PodStart); err != nil { + if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil { return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) } framework.Logf("Pod %v succeeded ", pod.Name) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 6a2210941a7..4e24f2c8a57 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -51,10 +51,10 @@ var _ = utils.SIGDescribe("PV Protection", func() { f := framework.NewDefaultFramework("pv-protection") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) // Enforce binding only within test space via selector labels volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} @@ -80,58 +80,58 @@ var _ = utils.SIGDescribe("PV Protection", func() { // make the pv definitions pv = e2epv.MakePersistentVolume(pvConfig) // create the PV - pv, err = client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PV") ginkgo.By("Waiting for PV to enter phase Available") - framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) + framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) ginkgo.By("Checking that PV Protection finalizer is set") - pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PV status") framework.ExpectEqual(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil), true, "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { framework.Logf("AfterEach: Cleaning up test resources.") - if errs := e2epv.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 { + if errs := e2epv.PVPVCCleanup(ctx, client, nameSpace, pv, pvc); len(errs) > 0 { framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func(ctx context.Context) { ginkgo.By("Deleting the PV") - err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") - err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, f.Timeouts.PVDelete) framework.ExpectNoError(err, "waiting for PV to be deleted") }) ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func(ctx context.Context) { ginkgo.By("Creating a PVC") pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PVC") ginkgo.By("Waiting for PVC to become Bound") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") - err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") ginkgo.By("Checking that the PV status is Terminating") - pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PV status") framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Deleting the PVC that is bound to the PV") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") - err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, f.Timeouts.PVDelete) framework.ExpectNoError(err, "waiting for PV to be deleted") }) }) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index ec8d682e7de..d501ff8a4c5 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -45,10 +45,10 @@ const ( ) // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. -func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { +func waitForPersistentVolumeClaimDeleted(ctx context.Context, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) + _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) @@ -72,14 +72,14 @@ var _ = utils.SIGDescribe("PVC Protection", func() { f := framework.NewDefaultFramework("pvc-protection") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) ginkgo.By("Creating a PVC") prefix := "pvc-protection" - e2epv.SkipIfNoDefaultStorageClass(client) + e2epv.SkipIfNoDefaultStorageClass(ctx, client) t := testsuites.StorageClassTest{ Timeouts: f.Timeouts, ClaimSize: "1Gi", @@ -89,91 +89,91 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ClaimSize: t.ClaimSize, VolumeMode: &t.VolumeMode, }, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PVC") pvcCreatedAndNotDeleted = true ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC") pvcClaims := []*v1.PersistentVolumeClaim{pvc} - pod, err = e2epod.CreatePod(client, nameSpace, nil, pvcClaims, false, "") + pod, err = e2epod.CreatePod(ctx, client, nameSpace, nil, pvcClaims, false, "") framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") ginkgo.By("Waiting for PVC to become Bound") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) ginkgo.By("Checking that PVC Protection finalizer is set") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PVC status") framework.ExpectEqual(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil), true, "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if pvcCreatedAndNotDeleted { - e2epv.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace) + e2epv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, nameSpace) } }) ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func(ctx context.Context) { ginkgo.By("Deleting the pod using the PVC") - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Deleting the PVC") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(ctx, client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func(ctx context.Context) { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Deleting the pod that uses the PVC") - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(ctx, client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func(ctx context.Context) { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") - secondPod, err2 := e2epod.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") + secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") ginkgo.By("Deleting the second pod that uses the PVC that is being deleted") - err = e2epod.DeletePodWithWait(client, secondPod) + err = e2epod.DeletePodWithWait(ctx, client, secondPod) framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking again that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Deleting the first pod that uses the PVC") - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(ctx, client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) }) diff --git a/test/e2e/storage/pvc_storageclass.go b/test/e2e/storage/pvc_storageclass.go index fe55e2d2eb7..c7a903e5d37 100644 --- a/test/e2e/storage/pvc_storageclass.go +++ b/test/e2e/storage/pvc_storageclass.go @@ -64,7 +64,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { ginkgo.It("should assign default SC to PVCs that have no SC set", func(ctx context.Context) { // Temporarily set all default storage classes as non-default - restoreClasses := temporarilyUnsetDefaultClasses(client) + restoreClasses := temporarilyUnsetDefaultClasses(ctx, client) defer restoreClasses() // Create PVC with nil SC @@ -73,11 +73,11 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { ClaimSize: t.ClaimSize, VolumeMode: &t.VolumeMode, }, namespace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvcObj.Namespace).Create(context.TODO(), pvcObj, metav1.CreateOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvcObj.Namespace).Create(ctx, pvcObj, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating PVC") defer func(pvc *v1.PersistentVolumeClaim) { // Remove test PVC - err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Error cleaning up PVC") }(pvc) @@ -85,7 +85,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { storageClass := testsuites.SetupStorageClass(ctx, client, makeStorageClass(prefixSC)) // Wait for PVC to get updated with the new default SC - pvc, err = waitForPVCStorageClass(client, namespace, pvc.Name, storageClass.Name, f.Timeouts.ClaimBound) + pvc, err = waitForPVCStorageClass(ctx, client, namespace, pvc.Name, storageClass.Name, f.Timeouts.ClaimBound) framework.ExpectNoError(err, "Error updating PVC with the correct storage class") // Create PV with specific class @@ -99,15 +99,15 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { }, }, }) - _, err = e2epv.CreatePV(client, f.Timeouts, pv) + _, err = e2epv.CreatePV(ctx, client, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv %v", err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolume, client, pv.Name) // Verify the PVC is bound and has the new default SC claimNames := []string{pvc.Name} - err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, false) + err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, false) framework.ExpectNoError(err) - updatedPVC, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + updatedPVC, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(*updatedPVC.Spec.StorageClassName, storageClass.Name, "Expected PVC %v to have StorageClass %v, but it has StorageClass %v instead", updatedPVC.Name, prefixSC, updatedPVC.Spec.StorageClassName) framework.Logf("Success - PersistentVolumeClaim %s got updated retroactively with StorageClass %v", updatedPVC.Name, storageClass.Name) @@ -130,8 +130,8 @@ func makeStorageClass(prefixSC string) *storagev1.StorageClass { } } -func temporarilyUnsetDefaultClasses(client clientset.Interface) func() { - classes, err := client.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) +func temporarilyUnsetDefaultClasses(ctx context.Context, client clientset.Interface) func() { + classes, err := client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) var changedClasses []storagev1.StorageClass @@ -140,7 +140,7 @@ func temporarilyUnsetDefaultClasses(client clientset.Interface) func() { if sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] == "true" { changedClasses = append(changedClasses, sc) sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = "false" - _, err := client.StorageV1().StorageClasses().Update(context.TODO(), &sc, metav1.UpdateOptions{}) + _, err := client.StorageV1().StorageClasses().Update(ctx, &sc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } } @@ -148,19 +148,19 @@ func temporarilyUnsetDefaultClasses(client clientset.Interface) func() { return func() { for _, sc := range changedClasses { sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = "true" - _, err := client.StorageV1().StorageClasses().Update(context.TODO(), &sc, metav1.UpdateOptions{}) + _, err := client.StorageV1().StorageClasses().Update(ctx, &sc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } } } -func waitForPVCStorageClass(c clientset.Interface, namespace, pvcName, scName string, timeout time.Duration) (*v1.PersistentVolumeClaim, error) { +func waitForPVCStorageClass(ctx context.Context, c clientset.Interface, namespace, pvcName, scName string, timeout time.Duration) (*v1.PersistentVolumeClaim, error) { var watchedPVC *v1.PersistentVolumeClaim - err := wait.Poll(1*time.Second, timeout, func() (bool, error) { + err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) { var err error - watchedPVC, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + watchedPVC, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { return true, err } diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 55875144bb2..7e2aad92e37 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -67,12 +67,12 @@ var _ = utils.SIGDescribe("Regional PD", func() { var c clientset.Interface var ns string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name e2eskipper.SkipUnlessProviderIs("gce", "gke") - e2eskipper.SkipUnlessMultizone(c) + e2eskipper.SkipUnlessMultizone(ctx, c) }) ginkgo.Describe("RegionalPD", func() { @@ -95,13 +95,13 @@ var _ = utils.SIGDescribe("Regional PD", func() { }) ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func(ctx context.Context) { - testZonalFailover(c, ns) + testZonalFailover(ctx, c, ns) }) }) }) func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) { - cloudZones := getTwoRandomZones(c) + cloudZones := getTwoRandomZones(ctx, c) // This test checks that dynamic provisioning can provision a volume // that can be used to persist data among pods. @@ -118,7 +118,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame }, ClaimSize: repdMinSize, ExpectedSize: repdMinSize, - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil()) @@ -140,13 +140,13 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame }, ClaimSize: repdMinSize, ExpectedSize: repdMinSize, - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil()) err := checkGCEPD(volume, "pd-standard") framework.ExpectNoError(err, "checkGCEPD") - zones, err := e2enode.GetClusterZones(c) + zones, err := e2enode.GetClusterZones(ctx, c) framework.ExpectNoError(err, "GetClusterZones") err = verifyZonesInPV(volume, zones, false /* match */) framework.ExpectNoError(err, "verifyZonesInPV") @@ -168,8 +168,8 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame } } -func testZonalFailover(c clientset.Interface, ns string) { - cloudZones := getTwoRandomZones(c) +func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) { + cloudZones := getTwoRandomZones(ctx, c) testSpec := testsuites.StorageClassTest{ Name: "Regional PD Failover on GCE/GKE", CloudProviders: []string{"gce", "gke"}, @@ -193,18 +193,18 @@ func testZonalFailover(c clientset.Interface, ns string) { statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) ginkgo.By("creating a StorageClass " + class.Name) - _, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) + _, err := c.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{}), + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(ctx, class.Name, metav1.DeleteOptions{}), "Error deleting StorageClass %s", class.Name) }() ginkgo.By("creating a StatefulSet") - _, err = c.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err = c.CoreV1().Services(ns).Create(ctx, service, metav1.CreateOptions{}) framework.ExpectNoError(err) - _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) + _, err = c.AppsV1().StatefulSets(ns).Create(ctx, statefulSet, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { @@ -214,43 +214,43 @@ func testZonalFailover(c clientset.Interface, ns string) { "Error deleting StatefulSet %s", statefulSet.Name) framework.Logf("deleting claims in namespace %s", ns) - pvc := getPVC(c, ns, regionalPDLabels) + pvc := getPVC(ctx, c, ns, regionalPDLabels) framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}), "Error deleting claim %s.", pvc.Name) if pvc.Spec.VolumeName != "" { - err = e2epv.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout) if err != nil { framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName) } } }) - err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) + err = waitForStatefulSetReplicasReady(ctx, statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) if err != nil { - pod := getPod(c, ns, regionalPDLabels) + pod := getPod(ctx, c, ns, regionalPDLabels) if !podutil.IsPodReadyConditionTrue(pod.Status) { framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions) } framework.ExpectNoError(err) } - pvc := getPVC(c, ns, regionalPDLabels) + pvc := getPVC(ctx, c, ns, regionalPDLabels) ginkgo.By("getting zone information from pod") - pod := getPod(c, ns, regionalPDLabels) + pod := getPod(ctx, c, ns, regionalPDLabels) nodeName := pod.Spec.NodeName - node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) podZone := node.Labels[v1.LabelTopologyZone] ginkgo.By("tainting nodes in the zone the pod is scheduled in") selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelTopologyZone: podZone})) - nodesInZone, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + nodesInZone, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err) - addTaint(c, ns, nodesInZone.Items, podZone) + addTaint(ctx, c, ns, nodesInZone.Items, podZone) ginkgo.By("deleting StatefulSet pod") - err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{}) // Verify the pod is scheduled in the other zone. ginkgo.By("verifying the pod is scheduled in a different zone.") @@ -260,10 +260,10 @@ func testZonalFailover(c clientset.Interface, ns string) { } else { otherZone = cloudZones[0] } - waitErr := wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, framework.Poll, statefulSetReadyTimeout, func(ctx context.Context) (bool, error) { framework.Logf("Checking whether new pod is scheduled in zone %q", otherZone) - pod := getPod(c, ns, regionalPDLabels) - node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) + pod := getPod(ctx, c, ns, regionalPDLabels) + node, err := c.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) if err != nil { return false, nil } @@ -272,9 +272,9 @@ func testZonalFailover(c clientset.Interface, ns string) { }) framework.ExpectNoError(waitErr, "Error waiting for pod to be scheduled in a different zone (%q): %v", otherZone, err) - err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) + err = waitForStatefulSetReplicasReady(ctx, statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) if err != nil { - pod := getPod(c, ns, regionalPDLabels) + pod := getPod(ctx, c, ns, regionalPDLabels) if !podutil.IsPodReadyConditionTrue(pod.Status) { framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions) } @@ -282,10 +282,10 @@ func testZonalFailover(c clientset.Interface, ns string) { } ginkgo.By("verifying the same PVC is used by the new pod") - framework.ExpectEqual(getPVC(c, ns, regionalPDLabels).Name, pvc.Name, "The same PVC should be used after failover.") + framework.ExpectEqual(getPVC(ctx, c, ns, regionalPDLabels).Name, pvc.Name, "The same PVC should be used after failover.") ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") - logs, err := e2epod.GetPodLogs(c, ns, pod.Name, "") + logs, err := e2epod.GetPodLogs(ctx, c, ns, pod.Name, "") framework.ExpectNoError(err, "Error getting logs from pod %s in namespace %s", pod.Name, ns) lineCount := len(strings.Split(strings.TrimSpace(logs), "\n")) @@ -294,7 +294,7 @@ func testZonalFailover(c clientset.Interface, ns string) { } -func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) { +func addTaint(ctx context.Context, c clientset.Interface, ns string, nodes []v1.Node, podZone string) { for _, node := range nodes { oldData, err := json.Marshal(node) framework.ExpectNoError(err) @@ -314,7 +314,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{}) framework.ExpectNoError(err) - _, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = c.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err) nodeName := node.Name @@ -381,7 +381,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n suffix := "topo-regional" test.Client = c test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) - zones := getTwoRandomZones(c) + zones := getTwoRandomZones(ctx, c) addAllowedTopologiesToStorageClass(c, test.Class, zones) test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ NamePrefix: pvcName, @@ -410,7 +410,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clie suffix := "topo-delayed-regional" test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) - topoZones := getTwoRandomZones(c) + topoZones := getTwoRandomZones(ctx, c) addAllowedTopologiesToStorageClass(c, test.Class, topoZones) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { @@ -444,20 +444,20 @@ func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clie } } -func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim { +func getPVC(ctx context.Context, c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim { selector := labels.Set(pvcLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), options) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, options) framework.ExpectNoError(err) framework.ExpectEqual(len(pvcList.Items), 1, "There should be exactly 1 PVC matched.") return &pvcList.Items[0] } -func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.Pod { +func getPod(ctx context.Context, c clientset.Interface, ns string, podLabels map[string]string) *v1.Pod { selector := labels.Set(podLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + podList, err := c.CoreV1().Pods(ns).List(ctx, options) framework.ExpectNoError(err) framework.ExpectEqual(len(podList.Items), 1, "There should be exactly 1 pod matched.") @@ -556,8 +556,8 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec { } } -func getTwoRandomZones(c clientset.Interface) []string { - zones, err := e2enode.GetClusterZones(c) +func getTwoRandomZones(ctx context.Context, c clientset.Interface) []string { + zones, err := e2enode.GetClusterZones(ctx, c) framework.ExpectNoError(err) gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2), "The test should only be run in multizone clusters.") @@ -640,10 +640,10 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, } // waitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first. -func waitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error { +func waitForStatefulSetReplicasReady(ctx context.Context, statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - sts, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), statefulSetName, metav1.GetOptions{}) + sts, err := c.AppsV1().StatefulSets(ns).Get(ctx, statefulSetName, metav1.GetOptions{}) if err != nil { framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err) continue diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index eca7b7851c2..3045384370a 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -37,16 +37,16 @@ var _ = utils.SIGDescribe("Subpath", func() { var err error var privilegedSecurityContext bool = false - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Setting up data") secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating secret") } configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configmap, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating configmap") } @@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Subpath", func() { */ framework.ConformanceIt("should support subpaths with secret pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext) - testsuites.TestBasicSubpath(f, "secret-value", pod) + testsuites.TestBasicSubpath(ctx, f, "secret-value", pod) }) /* @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("Subpath", func() { */ framework.ConformanceIt("should support subpaths with configmap pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) - testsuites.TestBasicSubpath(f, "configmap-value", pod) + testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod) }) /* @@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("Subpath", func() { pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) file := "/etc/resolv.conf" pod.Spec.Containers[0].VolumeMounts[0].MountPath = file - testsuites.TestBasicSubpathFile(f, "configmap-value", pod, file) + testsuites.TestBasicSubpathFile(ctx, f, "configmap-value", pod, file) }) /* @@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}, }, }, privilegedSecurityContext) - testsuites.TestBasicSubpath(f, pod.Name, pod) + testsuites.TestBasicSubpath(ctx, f, pod.Name, pod) }) /* @@ -114,7 +114,7 @@ var _ = utils.SIGDescribe("Subpath", func() { }, }, }, privilegedSecurityContext) - testsuites.TestBasicSubpath(f, "configmap-value", pod) + testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod) }) }) @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("Subpath", func() { ginkgo.It("should verify that container can restart successfully after configmaps modified", func(ctx context.Context) { configmapToModify := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-value"}} configmapModified := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-modified-value"}} - testsuites.TestPodContainerRestartWithConfigmapModified(f, configmapToModify, configmapModified) + testsuites.TestPodContainerRestartWithConfigmapModified(ctx, f, configmapToModify, configmapModified) }) }) }) diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 781b3570cf1..453196b4a44 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -108,14 +108,14 @@ func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) op return totOps } -func getVolumeOpCounts(c clientset.Interface, config *rest.Config, pluginName string) opCounts { +func getVolumeOpCounts(ctx context.Context, c clientset.Interface, config *rest.Config, pluginName string) opCounts { if !framework.ProviderIs("gce", "gke", "aws") { return opCounts{} } nodeLimit := 25 - metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, config, true, false, true, false, false, false) + metricsGrabber, err := e2emetrics.NewMetricsGrabber(ctx, c, nil, config, true, false, true, false, false, false) if err != nil { framework.ExpectNoError(err, "Error creating metrics grabber: %v", err) @@ -126,19 +126,19 @@ func getVolumeOpCounts(c clientset.Interface, config *rest.Config, pluginName st return opCounts{} } - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "Error getting c-m metrics : %v", err) totOps := getVolumeOpsFromMetricsForPlugin(testutil.Metrics(controllerMetrics), pluginName) framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server") - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Error listing nodes: %v", err) if len(nodes.Items) <= nodeLimit { // For large clusters with > nodeLimit nodes it is too time consuming to // gather metrics from all nodes. We just ignore the node metrics // for those clusters for _, node := range nodes.Items { - nodeMetrics, err := metricsGrabber.GrabFromKubelet(node.GetName()) + nodeMetrics, err := metricsGrabber.GrabFromKubelet(ctx, node.GetName()) framework.ExpectNoError(err, "Error getting Kubelet %v metrics: %v", node.GetName(), err) totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(testutil.Metrics(nodeMetrics), pluginName)) } @@ -164,7 +164,7 @@ func addOpCounts(o1 opCounts, o2 opCounts) opCounts { return totOps } -func getMigrationVolumeOpCounts(cs clientset.Interface, config *rest.Config, pluginName string) (opCounts, opCounts) { +func getMigrationVolumeOpCounts(ctx context.Context, cs clientset.Interface, config *rest.Config, pluginName string) (opCounts, opCounts) { if len(pluginName) > 0 { var migratedOps opCounts l := csitrans.New() @@ -174,16 +174,16 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, config *rest.Config, plu migratedOps = opCounts{} } else { csiName = "kubernetes.io/csi:" + csiName - migratedOps = getVolumeOpCounts(cs, config, csiName) + migratedOps = getVolumeOpCounts(ctx, cs, config, csiName) } - return getVolumeOpCounts(cs, config, pluginName), migratedOps + return getVolumeOpCounts(ctx, cs, config, pluginName), migratedOps } // Not an in-tree driver framework.Logf("Test running for native CSI Driver, not checking metrics") return opCounts{}, opCounts{} } -func newMigrationOpCheck(cs clientset.Interface, config *rest.Config, pluginName string) *migrationOpCheck { +func newMigrationOpCheck(ctx context.Context, cs clientset.Interface, config *rest.Config, pluginName string) *migrationOpCheck { moc := migrationOpCheck{ cs: cs, config: config, @@ -223,16 +223,16 @@ func newMigrationOpCheck(cs clientset.Interface, config *rest.Config, pluginName return &moc } - moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(cs, config, pluginName) + moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(ctx, cs, config, pluginName) return &moc } -func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() { +func (moc *migrationOpCheck) validateMigrationVolumeOpCounts(ctx context.Context) { if moc.skipCheck { return } - newInTreeOps, _ := getMigrationVolumeOpCounts(moc.cs, moc.config, moc.pluginName) + newInTreeOps, _ := getMigrationVolumeOpCounts(ctx, moc.cs, moc.config, moc.pluginName) for op, count := range newInTreeOps { if count != moc.oldInTreeOps[op] { diff --git a/test/e2e/storage/testsuites/capacity.go b/test/e2e/storage/testsuites/capacity.go index 70fb4449460..74c79030552 100644 --- a/test/e2e/storage/testsuites/capacity.go +++ b/test/e2e/storage/testsuites/capacity.go @@ -90,24 +90,24 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt f := framework.NewFrameworkWithCustomTimeouts("capacity", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. - config := driver.PrepareTest(f) - sc = dDriver.GetDynamicProvisionStorageClass(config, pattern.FsType) + config := driver.PrepareTest(ctx, f) + sc = dDriver.GetDynamicProvisionStorageClass(ctx, config, pattern.FsType) if sc == nil { e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } } ginkgo.It("provides storage capacity information", func(ctx context.Context) { - init() + init(ctx) timeout := time.Minute pollInterval := time.Second matchSC := HaveCapacitiesForClass(sc.Name) - listAll := gomega.Eventually(func() (*storagev1.CSIStorageCapacityList, error) { - return f.ClientSet.StorageV1().CSIStorageCapacities("").List(context.Background(), metav1.ListOptions{}) + listAll := gomega.Eventually(ctx, func() (*storagev1.CSIStorageCapacityList, error) { + return f.ClientSet.StorageV1().CSIStorageCapacities("").List(ctx, metav1.ListOptions{}) }, timeout, pollInterval) // If we have further information about what storage @@ -127,7 +127,7 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt // drivers with multiple keys might be // possible, too, but is not currently // implemented. - matcher = HaveCapacitiesForClassAndNodes(f.ClientSet, sc.Provisioner, sc.Name, dInfo.TopologyKeys[0]) + matcher = HaveCapacitiesForClassAndNodes(ctx, f.ClientSet, sc.Provisioner, sc.Name, dInfo.TopologyKeys[0]) } // Create storage class and wait for capacity information. @@ -239,8 +239,9 @@ func (h *haveCSIStorageCapacities) NegatedFailureMessage(actual interface{}) (me // HaveCapacitiesForClassAndNodes matches objects by storage class name. It finds // all nodes on which the driver runs and expects one object per node. -func HaveCapacitiesForClassAndNodes(client kubernetes.Interface, driverName, scName, topologyKey string) CapacityMatcher { +func HaveCapacitiesForClassAndNodes(ctx context.Context, client kubernetes.Interface, driverName, scName, topologyKey string) CapacityMatcher { return &haveLocalStorageCapacities{ + ctx: ctx, client: client, driverName: driverName, match: HaveCapacitiesForClass(scName), @@ -249,6 +250,7 @@ func HaveCapacitiesForClassAndNodes(client kubernetes.Interface, driverName, scN } type haveLocalStorageCapacities struct { + ctx context.Context client kubernetes.Interface driverName string match CapacityMatcher @@ -263,6 +265,7 @@ type haveLocalStorageCapacities struct { var _ CapacityMatcher = &haveLocalStorageCapacities{} func (h *haveLocalStorageCapacities) Match(actual interface{}) (success bool, err error) { + ctx := h.ctx h.expectedCapacities = nil h.unexpectedCapacities = nil h.missingTopologyValues = nil @@ -275,7 +278,7 @@ func (h *haveLocalStorageCapacities) Match(actual interface{}) (success bool, er } // Find all nodes on which the driver runs. - csiNodes, err := h.client.StorageV1().CSINodes().List(context.Background(), metav1.ListOptions{}) + csiNodes, err := h.client.StorageV1().CSINodes().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -285,7 +288,7 @@ func (h *haveLocalStorageCapacities) Match(actual interface{}) (success bool, er if driver.Name != h.driverName { continue } - node, err := h.client.CoreV1().Nodes().Get(context.Background(), csiNode.Name, metav1.GetOptions{}) + node, err := h.client.CoreV1().Nodes().Get(ctx, csiNode.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 72d2769b22d..d8168e12996 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -92,23 +92,25 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func(accessModes []v1.PersistentVolumeAccessMode) { + init := func(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode) { l = local{} l.ns = f.Namespace l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange if accessModes == nil { l.resource = storageframework.CreateVolumeResource( + ctx, driver, l.config, pattern, testVolumeSizeRange) } else { l.resource = storageframework.CreateVolumeResourceWithAccessModes( + ctx, driver, l.config, pattern, @@ -117,17 +119,17 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa } } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.pod != nil { ginkgo.By("Deleting pod") - err := e2epod.DeletePodWithWait(f.ClientSet, l.pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod) errs = append(errs, err) l.pod = nil } if l.resource != nil { - err := l.resource.CleanupResource() + err := l.resource.CleanupResource(ctx) errs = append(errs, err) l.resource = nil } @@ -135,7 +137,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") } - type singlePodTestBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, mountPath string) + type singlePodTestBody func(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, mountPath string) type singlePodTest struct { testItStmt string runTestFile singlePodTestBody @@ -164,7 +166,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) || (pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) { ginkgo.It(t.testItStmt, func(ctx context.Context) { - init(nil) + init(ctx, nil) ginkgo.DeferCleanup(cleanup) var err error @@ -184,20 +186,20 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa NodeSelection: l.config.ClientNodeSelection, ImageID: e2epod.GetDefaultTestImageID(), } - l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, l.cs, &podConfig, f.Timeouts.PodStart) framework.ExpectNoError(err, "While creating pods for kubelet restart test") if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { - t.runTestBlock(l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1) + t.runTestBlock(ctx, l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1) } if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { - t.runTestFile(l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1) + t.runTestFile(ctx, l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1) } }) } }(test) } - type multiplePodTestBody func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) + type multiplePodTestBody func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) type multiplePodTest struct { testItStmt string changeSELinuxContexts bool @@ -206,28 +208,28 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa multiplePodTests := []multiplePodTest{ { testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].", - runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { - storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) + runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { + storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) }, }, { testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].", - runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { - storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) + runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { + storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) }, }, { testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].", changeSELinuxContexts: true, - runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { - storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) + runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { + storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) }, }, { testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].", changeSELinuxContexts: true, - runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { - storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) + runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { + storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) }, }, } @@ -236,7 +238,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa func(t multiplePodTest) { if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { ginkgo.It(t.testItStmt, func(ctx context.Context) { - init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}) + init(ctx, []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}) ginkgo.DeferCleanup(cleanup) var err error @@ -256,7 +258,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa NodeSelection: l.config.ClientNodeSelection, ImageID: e2epod.GetDefaultTestImageID(), } - l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, l.cs, &podConfig, f.Timeouts.PodStart) framework.ExpectNoError(err, "While creating pods for kubelet restart test") if t.changeSELinuxContexts { // Different than e2epv.SELinuxLabel @@ -267,7 +269,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa pod2.Spec.NodeName = l.pod.Spec.NodeName framework.ExpectNoError(err, "While creating second pod for kubelet restart test") if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { - t.runTestFile(l.cs, l.config.Framework, l.pod, pod2) + t.runTestFile(ctx, l.cs, l.config.Framework, l.pod, pod2) } }) } diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 5e612e8f723..f9e7a69113e 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -140,8 +140,8 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{}) + l.config = driver.PrepareTest(ctx, f) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, e2evolume.SizeRange{}) switch pattern.VolType { case storageframework.CSIInlineVolume: @@ -166,9 +166,9 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } } - cleanup := func() { + cleanup := func(ctx context.Context) { var cleanUpErrs []error - cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource()) + cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource(ctx)) err := utilerrors.NewAggregate(cleanUpErrs) framework.ExpectNoError(err, "while cleaning up") } @@ -182,7 +182,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat ginkgo.DeferCleanup(cleanup) l.testCase.ReadOnly = true - l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { + l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} { command := "mount | grep /mnt/test | grep ro," if framework.NodeOSDistroIs("windows") { // attempt to create a dummy file and expect for it not to be created @@ -199,7 +199,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat ginkgo.DeferCleanup(cleanup) l.testCase.ReadOnly = false - l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { + l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} { command := "mount | grep /mnt/test | grep rw," if framework.NodeOSDistroIs("windows") { // attempt to create a dummy file and expect for it to be created @@ -227,7 +227,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } l.testCase.ReadOnly = false - l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { + l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} { podName := pod.Name framework.Logf("Running volume expansion checks %s", podName) @@ -249,7 +249,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat newSize.Add(resource.MustParse("1Gi")) framework.Logf("currentPvcSize %s, requested new size %s", currentPvcSize.String(), newSize.String()) - newPVC, err := ExpandPVCSize(pvc, newSize, f.ClientSet) + newPVC, err := ExpandPVCSize(ctx, pvc, newSize, f.ClientSet) framework.ExpectNoError(err, "While updating pvc for more size") pvc = newPVC gomega.Expect(pvc).NotTo(gomega.BeNil()) @@ -260,11 +260,11 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } ginkgo.By("Waiting for cloudprovider resize to finish") - err = WaitForControllerVolumeResize(pvc, f.ClientSet, totalResizeWaitPeriod) + err = WaitForControllerVolumeResize(ctx, pvc, f.ClientSet, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Waiting for file system resize to finish") - pvc, err = WaitForFSResize(pvc, f.ClientSet) + pvc, err = WaitForFSResize(ctx, pvc, f.ClientSet) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions @@ -287,13 +287,13 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat _, shared, readOnly = eDriver.GetVolume(l.config, 0) } - l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { + l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} { // Create another pod with the same inline volume attributes. pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000", []v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource}, readOnly, l.testCase.Node) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume") + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume") // If (and only if) we were able to mount // read/write and volume data is not shared @@ -306,7 +306,11 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") } - defer StopPodAndDependents(ctx, f.ClientSet, f.Timeouts, pod2) + // TestEphemeral expects the pod to be fully deleted + // when this function returns, so don't delay this + // cleanup. + StopPodAndDependents(ctx, f.ClientSet, f.Timeouts, pod2) + return nil } @@ -353,7 +357,7 @@ type EphemeralTest struct { // RunningPodCheck is invoked while a pod using an inline volume is running. // It can execute additional checks on the pod and its volume(s). Any data // returned by it is passed to StoppedPodCheck. - RunningPodCheck func(pod *v1.Pod) interface{} + RunningPodCheck func(ctx context.Context, pod *v1.Pod) interface{} // StoppedPodCheck is invoked after ensuring that the pod is gone. // It is passed the data gather by RunningPodCheck or nil if that @@ -361,7 +365,7 @@ type EphemeralTest struct { // like for example verifying that the ephemeral volume was really // removed. How to do such a check is driver-specific and not // covered by the generic storage test suite. - StoppedPodCheck func(nodeName string, runningPodData interface{}) + StoppedPodCheck func(ctx context.Context, nodeName string, runningPodData interface{}) // NumInlineVolumes sets the number of ephemeral inline volumes per pod. // Unset (= zero) is the same as one. @@ -410,7 +414,7 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) { // pod might be nil now. StopPodAndDependents(ctx, client, t.Timeouts, pod) }() - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume") + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume") runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -418,7 +422,7 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) { // Run the checker of the running pod. var runningPodData interface{} if t.RunningPodCheck != nil { - runningPodData = t.RunningPodCheck(pod) + runningPodData = t.RunningPodCheck(ctx, pod) } StopPodAndDependents(ctx, client, t.Timeouts, pod) @@ -431,7 +435,7 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) { gomega.Expect(pvcs.Items).Should(gomega.BeEmpty(), "no dangling PVCs") if t.StoppedPodCheck != nil { - t.StoppedPodCheck(actualNodeName, runningPodData) + t.StoppedPodCheck(ctx, actualNodeName, runningPodData) } } diff --git a/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/test/e2e/storage/testsuites/fsgroupchangepolicy.go index bed5b98b71f..72e621f378b 100644 --- a/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -108,19 +108,19 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { e2eskipper.SkipIfNodeOSDistroIs("windows") l = local{} l.driver = driver - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange - l.resource = storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.resource != nil { - if err := l.resource.CleanupResource(); err != nil { + if err := l.resource.CleanupResource(ctx); err != nil { errs = append(errs, err) } l.resource = nil @@ -217,7 +217,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD e2eskipper.Skipf("Driver %q supports VolumeMountGroup, which is incompatible with this test - skipping", dInfo.Name) } - init() + init(ctx) ginkgo.DeferCleanup(cleanup) podConfig := e2epod.Config{ NS: f.Namespace.Name, @@ -227,7 +227,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD PodFSGroupChangePolicy: &policy, } // Create initial pod and create files in root and sub-directory and verify ownership. - pod := createPodAndVerifyContentGid(l.config.Framework, &podConfig, true /* createInitialFiles */, "" /* expectedRootDirFileOwnership */, "" /* expectedSubDirFileOwnership */) + pod := createPodAndVerifyContentGid(ctx, l.config.Framework, &podConfig, true /* createInitialFiles */, "" /* expectedRootDirFileOwnership */, "" /* expectedSubDirFileOwnership */) // Change the ownership of files in the initial pod. if test.changedRootDirFileOwnership != 0 { @@ -241,21 +241,21 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD } ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod.Namespace, pod.Name)) - framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) // Create a second pod with existing volume and verify the contents ownership. podConfig.FsGroup = utilpointer.Int64Ptr(int64(test.secondPodFsGroup)) - pod = createPodAndVerifyContentGid(l.config.Framework, &podConfig, false /* createInitialFiles */, strconv.Itoa(test.finalExpectedRootDirFileOwnership), strconv.Itoa(test.finalExpectedSubDirFileOwnership)) + pod = createPodAndVerifyContentGid(ctx, l.config.Framework, &podConfig, false /* createInitialFiles */, strconv.Itoa(test.finalExpectedRootDirFileOwnership), strconv.Itoa(test.finalExpectedSubDirFileOwnership)) ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod.Namespace, pod.Name)) - framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) }) } } -func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod { +func createPodAndVerifyContentGid(ctx context.Context, f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod { podFsGroup := strconv.FormatInt(*podConfig.FsGroup, 10) ginkgo.By(fmt.Sprintf("Creating Pod in namespace %s with fsgroup %s", podConfig.NS, podFsGroup)) - pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, f.Timeouts.PodStart) + pod, err := e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, podConfig, f.Timeouts.PodStart) framework.ExpectNoError(err) framework.Logf("Pod %s/%s started successfully", pod.Namespace, pod.Name) diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 7494387f7c7..eef3d31a549 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -106,25 +106,25 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} l.ns = f.Namespace l.cs = f.ClientSet l.driver = driver // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error for _, resource := range l.resources { - errs = append(errs, resource.CleanupResource()) + errs = append(errs, resource.CleanupResource(ctx)) } framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } // This tests below configuration: @@ -140,7 +140,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } - init() + init(ctx) ginkgo.DeferCleanup(cleanup) var pvcs []*v1.PersistentVolumeClaim @@ -148,12 +148,11 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p for i := 0; i < numVols; i++ { testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } - - TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, + TestAccessMultipleVolumesAcrossPodRecreation(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, true /* sameNode */) }) @@ -170,7 +169,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Check different-node test requirement @@ -180,7 +179,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p if l.config.ClientNodeSelection.Name != "" { e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } - if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { + if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } @@ -189,12 +188,12 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p for i := 0; i < numVols; i++ { testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } - TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, + TestAccessMultipleVolumesAcrossPodRecreation(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, false /* sameNode */) }) @@ -215,7 +214,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } - init() + init(ctx) ginkgo.DeferCleanup(cleanup) var pvcs []*v1.PersistentVolumeClaim @@ -228,12 +227,12 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p curPattern.VolMode = v1.PersistentVolumeFilesystem } testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, driver, l.config, curPattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } - TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, + TestAccessMultipleVolumesAcrossPodRecreation(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, true /* sameNode */) }) @@ -254,7 +253,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Check different-node test requirement @@ -264,7 +263,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p if l.config.ClientNodeSelection.Name != "" { e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } - if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { + if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } @@ -278,12 +277,12 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p curPattern.VolMode = v1.PersistentVolumeFilesystem } testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, driver, l.config, curPattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } - TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, + TestAccessMultipleVolumesAcrossPodRecreation(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, false /* sameNode */) }) @@ -293,7 +292,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // \ / <- same volume mode // [volume1] ginkgo.It("should concurrently access the single volume from pods on the same node", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) numPods := 2 @@ -304,11 +303,11 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Test access to the volume from pods on different node - TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, + TestConcurrentAccessToSingleVolume(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */) }) @@ -318,7 +317,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // | | <- same volume mode // [volume1] -> [restored volume1 snapshot] ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if !l.driver.GetDriverInfo().Capabilities[storageframework.CapSnapshotDataSource] { @@ -330,7 +329,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Create a volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs := []*v1.PersistentVolumeClaim{resource.Pvc} @@ -355,13 +354,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p pvc2.Spec.VolumeName = "" pvc2.Spec.DataSourceRef = dataSourceRef - pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{}) + pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{}) framework.ExpectNoError(err) pvcs = append(pvcs, pvc2) ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{}) // Test access to both volumes on the same node. - TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) + TestConcurrentAccessToRelatedVolumes(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) }) // This tests below configuration: @@ -370,7 +369,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // | | <- same volume mode // [volume1] -> [cloned volume1] ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] { @@ -380,7 +379,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Create a volume expectedContent := fmt.Sprintf("volume content %d", time.Now().UTC().UnixNano()) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs := []*v1.PersistentVolumeClaim{resource.Pvc} testConfig := storageframework.ConvertTestConfig(l.config) @@ -397,13 +396,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p pvc2.Spec.VolumeName = "" pvc2.Spec.DataSourceRef = dataSourceRef - pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{}) + pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{}) framework.ExpectNoError(err) pvcs = append(pvcs, pvc2) ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{}) // Test access to both volumes on the same node. - TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) + TestConcurrentAccessToRelatedVolumes(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) }) // This tests below configuration: @@ -412,7 +411,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // \ / <- same volume mode (read only) // [volume1] ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) numPods := 2 @@ -427,14 +426,14 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Initialize the volume with a filesystem - it's going to be mounted as read-only below. - initializeVolume(l.cs, f.Timeouts, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection) + initializeVolume(ctx, l.cs, f.Timeouts, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection) // Test access to the volume from pods on a single node - TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, + TestConcurrentAccessToSingleVolume(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, true /* readOnly */) }) @@ -444,7 +443,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // \ / <- same volume mode // [volume1] ginkgo.It("should concurrently access the single volume from pods on different node", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) numPods := 2 @@ -458,24 +457,24 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name) } // For multi-node tests there must be enough nodes with the same toopology to schedule the pods - if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { + if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil { framework.Failf("Error setting topology requirements: %v", err) } // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(ctx, l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Test access to the volume from pods on different node - TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, + TestConcurrentAccessToSingleVolume(ctx, l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */, false /* readOnly */) }) } // testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written. -func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, +func testAccessMultipleVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string, node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) podConfig := e2epod.Config{ @@ -485,9 +484,9 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n NodeSelection: node, ImageID: e2epod.GetDefaultTestImageID(), } - pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart) + pod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart) defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod)) }() framework.ExpectNoError(err) @@ -511,7 +510,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i)) } - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") return pod.Spec.NodeName } @@ -519,14 +518,14 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n // TestAccessMultipleVolumesAcrossPodRecreation tests access to multiple volumes from single pod, // then recreate pod on the same or different node depending on requiresSameNode, // and recheck access to the volumes from the recreated pod -func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string, +func TestAccessMultipleVolumesAcrossPodRecreation(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string, node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) { // No data is written in volume, so passing negative value readSeedBase := int64(-1) writeSeedBase := time.Now().UTC().UnixNano() // Test access to multiple volumes on the specified node - nodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase) + nodeName := testAccessMultipleVolumes(ctx, f, cs, ns, node, pvcs, readSeedBase, writeSeedBase) // Set affinity depending on requiresSameNode if requiresSameNode { @@ -540,14 +539,14 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli readSeedBase = writeSeedBase // Update writeSeed with new value writeSeedBase = time.Now().UTC().UnixNano() - _ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase) + _ = testAccessMultipleVolumes(ctx, f, cs, ns, node, pvcs, readSeedBase, writeSeedBase) } // TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods, // then delete the last pod, and recheck access to the volume after pod deletion to check if other // pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode. // Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}. -func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string, +func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string, node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool, readOnly bool) { @@ -565,12 +564,15 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int PVCsReadOnly: readOnly, ImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils), } - pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart) - defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) - }() + pod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart) framework.ExpectNoError(err) - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + // The pod must get deleted before this function returns because the caller may try to + // delete volumes as part of the tests. Keeping the pod running would block that. + // If the test times out, then the namespace deletion will take care of it. + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod)) + }() + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) pods = append(pods, pod) framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index)) actualNodeName := pod.Spec.NodeName @@ -630,7 +632,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int } // Delete the last pod and remove from slice of pods lastPod := pods[len(pods)-1] - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, lastPod)) pods = pods[:len(pods)-1] // Recheck if pv can be accessed from each pod after the last pod deletion @@ -667,7 +669,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int // TestConcurrentAccessToRelatedVolumes tests access to multiple volumes from multiple pods. // Each provided PVC is used by a single pod. The test ensures that volumes created from // another volume (=clone) or volume snapshot can be used together with the original volume. -func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.Interface, ns string, +func TestConcurrentAccessToRelatedVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string, node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) { var pods []*v1.Pod @@ -684,9 +686,9 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I PVCsReadOnly: false, ImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils), } - pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart) + pod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart) defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod)) }() framework.ExpectNoError(err) pods = append(pods, pod) @@ -753,8 +755,8 @@ func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys // ensureTopologyRequirements check that there are enough nodes in the cluster for a test and // sets nodeSelection affinity according to given topology keys for drivers that provide them. -func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, cs clientset.Interface, driverInfo *storageframework.DriverInfo, minCount int) error { - nodes, err := e2enode.GetReadySchedulableNodes(cs) +func ensureTopologyRequirements(ctx context.Context, nodeSelection *e2epod.NodeSelection, cs clientset.Interface, driverInfo *storageframework.DriverInfo, minCount int) error { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) if len(nodes.Items) < minCount { e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", minCount)) @@ -785,7 +787,7 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, cs clientse } // initializeVolume creates a filesystem on given volume, so it can be used as read-only later -func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { +func initializeVolume(ctx context.Context, cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock { // Block volumes do not need to be initialized. return @@ -801,9 +803,9 @@ func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns st NodeSelection: node, ImageID: e2epod.GetDefaultTestImageID(), } - pod, err := e2epod.CreateSecPod(cs, &podConfig, t.PodStart) + pod, err := e2epod.CreateSecPod(ctx, cs, &podConfig, t.PodStart) defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod)) }() framework.ExpectNoError(err) } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index df5a15f7352..42aa0b5cc74 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -62,7 +62,7 @@ type StorageClassTest struct { DelayBinding bool ClaimSize string ExpectedSize string - PvCheck func(claim *v1.PersistentVolumeClaim) + PvCheck func(ctx context.Context, claim *v1.PersistentVolumeClaim) VolumeMode v1.PersistentVolumeMode AllowVolumeExpansion bool NodeSelection e2epod.NodeSelection @@ -136,12 +136,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts) l.cs = l.config.Framework.ClientSet testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange @@ -149,7 +149,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) - l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType) + l.sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType) if l.sc == nil { e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } @@ -185,10 +185,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, e2eskipper.Skipf("Block volumes do not support mount options - skipping") } - init() + init(ctx) l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() - l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) } SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) @@ -209,7 +209,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name) } - init() + init(ctx) dc := l.config.Framework.DynamicClient testConfig := storageframework.ConvertTestConfig(l.config) @@ -217,7 +217,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent) l.pvc.Spec.DataSourceRef = dataSourceRef - l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") tests := []e2evolume.Test{ { @@ -227,7 +227,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, ExpectedContent: expectedContent, }, } - e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests) } l.testCase.TestDynamicProvisioning(ctx) }) @@ -240,10 +240,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, e2eskipper.Skipf("Test for Block volumes is not implemented - skipping") } - init() + init(ctx) ginkgo.By("Creating validator namespace") - valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{ + valNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{ "e2e-framework": f.BaseName, "e2e-test-namespace": f.Namespace.Name, }) @@ -256,14 +256,14 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml", } - err = storageutils.CreateFromManifests(f, valNamespace, + err = storageutils.CreateFromManifests(ctx, f, valNamespace, func(item interface{}) error { return nil }, valManifests...) framework.ExpectNoError(err) ginkgo.By("Creating populator namespace") - popNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{ + popNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{ "e2e-framework": f.BaseName, "e2e-test-namespace": f.Namespace.Name, }) @@ -275,7 +275,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, "test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml", } - err = storageutils.CreateFromManifests(f, popNamespace, + err = storageutils.CreateFromManifests(ctx, f, popNamespace, func(item interface{}) error { switch item := item.(type) { case *appsv1.Deployment: @@ -334,12 +334,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, }, } - _, err = dc.Resource(volumePopulatorGVR).Create(context.TODO(), helloPopulatorCR, metav1.CreateOptions{}) + _, err = dc.Resource(volumePopulatorGVR).Create(ctx, helloPopulatorCR, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting VolumePopulator CR datasource %q/%q", helloPopulatorCR.GetNamespace(), helloPopulatorCR.GetName()) - err = dc.Resource(volumePopulatorGVR).Delete(context.TODO(), helloPopulatorCR.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(volumePopulatorGVR).Delete(ctx, helloPopulatorCR.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting VolumePopulator CR datasource %q. Error: %v", helloPopulatorCR.GetName(), err) } @@ -366,12 +366,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, }, } - _, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(context.TODO(), helloCR, metav1.CreateOptions{}) + _, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(ctx, helloCR, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting Hello CR datasource %q/%q", helloCR.GetNamespace(), helloCR.GetName()) - err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(context.TODO(), helloCR.GetName(), metav1.DeleteOptions{}) + err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(ctx, helloCR.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting Hello CR datasource %q. Error: %v", helloCR.GetName(), err) } @@ -386,7 +386,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, testConfig := storageframework.ConvertTestConfig(l.config) l.testCase.NodeSelection = testConfig.ClientNodeSelection - l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") tests := []e2evolume.Test{ { @@ -396,7 +396,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, ExpectedContent: expectedContent, }, } - e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests) } SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) @@ -408,12 +408,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) } - init() + init(ctx) if l.config.ClientNodeSelection.Name == "" { // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some // drivers don't support cloning across them. - if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil { + if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil { framework.Failf("Error setting topology requirements: %v", err) } } @@ -422,7 +422,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) l.pvc.Spec.DataSourceRef = dataSourceRef l.testCase.NodeSelection = testConfig.ClientNodeSelection - l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") tests := []e2evolume.Test{ { @@ -432,11 +432,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, ExpectedContent: expectedContent, }, } - e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests) } // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. - volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace) - e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) + volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace) + e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) l.testCase.TestDynamicProvisioning(ctx) }) @@ -449,12 +449,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name) } - init() + init(ctx) if l.config.ClientNodeSelection.Name == "" { // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some // drivers don't support cloning across them. - if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil { + if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil { framework.Failf("Error setting topology requirements: %v", err) } } @@ -476,7 +476,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, t := *l.testCase t.NodeSelection = testConfig.ClientNodeSelection - t.PvCheck = func(claim *v1.PersistentVolumeClaim) { + t.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i)) tests := []e2evolume.Test{ { @@ -486,11 +486,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, ExpectedContent: expectedContent, }, } - e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(ctx, f, myTestConfig, nil, "", tests) } // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. - volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace) - e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) + volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace) + e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) t.TestDynamicProvisioning(ctx) }(i) } @@ -509,9 +509,9 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, e2eskipper.Skipf("this driver does not support multiple PVs with the same volumeHandle") } - init() + init(ctx) - l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) { MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) } SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) @@ -554,7 +554,7 @@ func SetupStorageClass( } } else { // StorageClass is nil, so the default one will be used - scName, err := e2epv.GetDefaultStorageClassName(client) + scName, err := e2epv.GetDefaultStorageClassName(ctx, client) framework.ExpectNoError(err) ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName) computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) @@ -604,15 +604,15 @@ func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.Persi } var pod *v1.Pod - pod, err := e2epod.CreateSecPod(client, podConfig, t.Timeouts.DataSourceProvision) + pod, err := e2epod.CreateSecPod(ctx, client, podConfig, t.Timeouts.DataSourceProvision) // Delete pod now, otherwise PV can't be deleted below framework.ExpectNoError(err) - e2epod.DeletePodOrFail(client, pod.Namespace, pod.Name) + e2epod.DeletePodOrFail(ctx, client, pod.Namespace, pod.Name) } // Run the checker if t.PvCheck != nil { - t.PvCheck(claim) + t.PvCheck(ctx, claim) } pv := t.checkProvisioning(ctx, client, claim, class) @@ -629,7 +629,7 @@ func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.Persi // t.Timeouts.PVDeleteSlow) to recover from random cloud hiccups. if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name)) - framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow)) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow)) } return pv @@ -650,7 +650,7 @@ func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.Persist // checkProvisioning verifies that the claim is bound and has the correct properties func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { - err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) + err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("checking the claim") @@ -721,7 +721,7 @@ func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, // pod might be nil now. StopPod(ctx, client, pod) }) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -778,7 +778,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -791,7 +791,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) command = "grep 'hello world' /mnt/test/data" pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") @@ -817,7 +817,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co defer func() { errors := map[string]error{} for _, claim := range createdClaims { - err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace) + err := e2epv.DeletePersistentVolumeClaim(ctx, t.Client, claim.Name, claim.Namespace) if err != nil { errors[claim.Name] = err } @@ -831,7 +831,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out ginkgo.By("checking the claims are in pending state") - err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) + err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) framework.ExpectError(err) verifyPVCsPending(ctx, t.Client, createdClaims) @@ -839,14 +839,14 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co // Create a pod referring to the claim and wait for it to get to running var pod *v1.Pod if expectUnschedulable { - pod, err = e2epod.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) } else { - pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) } framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) error { - e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) - return e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete) + e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name) + return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete) }) if expectUnschedulable { // Verify that no claims are provisioned. @@ -865,7 +865,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // make sure claim did bind - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) framework.ExpectNoError(err) pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) @@ -881,7 +881,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node) defer StopPod(ctx, c, pod) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow)) // get the latest status of the pod pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -954,7 +954,7 @@ func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) { } else { framework.Logf("Pod %s has the following logs: %s", pod.Name, body) } - e2epod.DeletePodWithWait(c, pod) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } // StopPodAndDependents first tries to log the output of the pod's container, @@ -1008,13 +1008,13 @@ func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts * framework.Logf("pod Delete API error: %v", err) } framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name) - e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, timeouts.PodDelete) + framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, c, pod.Name, pod.Namespace, timeouts.PodDelete)) if len(podPVs) > 0 { for _, pv := range podPVs { // As with CSI inline volumes, we use the pod delete timeout here because conceptually // the volume deletion needs to be that fast (whatever "that" is). framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name) - e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PodDelete) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, timeouts.PodDelete)) } } } @@ -1065,10 +1065,10 @@ func prepareSnapshotDataSourceForProvisioning( ExpectedContent: injectContent, }, } - e2evolume.InjectContent(f, config, nil, "", tests) + e2evolume.InjectContent(ctx, f, config, nil, "", tests) parameters := map[string]string{} - snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters) + snapshotResource := storageframework.CreateSnapshotResource(ctx, sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters) group := "snapshot.storage.k8s.io" dataSourceRef := &v1.TypedObjectReference{ APIGroup: &group, @@ -1083,7 +1083,7 @@ func prepareSnapshotDataSourceForProvisioning( framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err) } - err = snapshotResource.CleanupResource(f.Timeouts) + err = snapshotResource.CleanupResource(ctx, f.Timeouts) framework.ExpectNoError(err) } ginkgo.DeferCleanup(cleanupFunc) @@ -1120,7 +1120,7 @@ func preparePVCDataSourceForProvisioning( ExpectedContent: injectContent, }, } - e2evolume.InjectContent(f, config, nil, "", tests) + e2evolume.InjectContent(ctx, f, config, nil, "", tests) dataSourceRef := &v1.TypedObjectReference{ Kind: "PersistentVolumeClaim", @@ -1151,11 +1151,11 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf NodeSelection: node, PVCs: []*v1.PersistentVolumeClaim{claim}, } - pod1, err := e2epod.CreateSecPodWithNodeSelection(client, &pod1Config, timeouts.PodStart) + pod1, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod1Config, timeouts.PodStart) framework.ExpectNoError(err) defer func() { ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name)) - framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod1)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod1)) }() ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName)) @@ -1178,7 +1178,7 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf VolumeMode: e2evolume.Spec.VolumeMode, } - pv2, pvc2, err := e2epv.CreatePVCPV(client, timeouts, pv2Config, pvc2Config, claim.Namespace, true) + pv2, pvc2, err := e2epv.CreatePVCPV(ctx, client, timeouts, pv2Config, pvc2Config, claim.Namespace, true) framework.ExpectNoError(err, "PVC, PV creation failed") framework.Logf("Created PVC %s/%s and PV %s", pvc2.Namespace, pvc2.Name, pv2.Name) @@ -1187,16 +1187,16 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName}, PVCs: []*v1.PersistentVolumeClaim{pvc2}, } - pod2, err := e2epod.CreateSecPodWithNodeSelection(client, &pod2Config, timeouts.PodStart) + pod2, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod2Config, timeouts.PodStart) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName)) ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name)) - framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod2)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod2)) - err = e2epv.DeletePersistentVolumeClaim(client, pvc2.Name, pvc2.Namespace) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, pvc2.Namespace) framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name) - err = e2epv.DeletePersistentVolume(client, pv2.Name) + err = e2epv.DeletePersistentVolume(ctx, client, pv2.Name) framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name) } diff --git a/test/e2e/storage/testsuites/readwriteoncepod.go b/test/e2e/storage/testsuites/readwriteoncepod.go index 9fb6c840650..90cf19d7668 100644 --- a/test/e2e/storage/testsuites/readwriteoncepod.go +++ b/test/e2e/storage/testsuites/readwriteoncepod.go @@ -92,39 +92,39 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv f := framework.NewFrameworkWithCustomTimeouts("read-write-once-pod", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = readWriteOncePodTest{} - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) l.cs = f.ClientSet l.pods = []*v1.Pod{} - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driverInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driverInfo.InTreePluginName) } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error for _, pod := range l.pods { framework.Logf("Deleting pod %v", pod.Name) - err := e2epod.DeletePodWithWait(l.cs, pod) + err := e2epod.DeletePodWithWait(ctx, l.cs, pod) errs = append(errs, err) } framework.Logf("Deleting volume %s", l.volume.Pvc.GetName()) - err := l.volume.CleanupResource() + err := l.volume.CleanupResource(ctx) errs = append(errs, err) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } - ginkgo.BeforeEach(func() { - init() + ginkgo.BeforeEach(func(ctx context.Context) { + init(ctx) ginkgo.DeferCleanup(cleanup) }) ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume", func(ctx context.Context) { // Create the ReadWriteOncePod PVC. accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod} - l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) + l.volume = storageframework.CreateVolumeResourceWithAccessModes(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) podConfig := e2epod.Config{ NS: f.Namespace.Name, @@ -135,32 +135,32 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv // Create the first pod, which will take ownership of the ReadWriteOncePod PVC. pod1, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err, "failed to create spec for pod1") - _, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(context.TODO(), pod1, metav1.CreateOptions{}) + _, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod1") - err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for pod1 running status") l.pods = append(l.pods, pod1) // Create the second pod, which will fail scheduling because the ReadWriteOncePod PVC is already in use. pod2, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err, "failed to create spec for pod2") - _, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(context.TODO(), pod2, metav1.CreateOptions{}) + _, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod2") - err = e2epod.WaitForPodNameUnschedulableInNamespace(l.cs, pod2.Name, pod2.Namespace) + err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace) framework.ExpectNoError(err, "failed to wait for pod2 unschedulable status") l.pods = append(l.pods, pod2) // Delete the first pod and observe the second pod can now start. - err = e2epod.DeletePodWithWait(l.cs, pod1) + err = e2epod.DeletePodWithWait(ctx, l.cs, pod1) framework.ExpectNoError(err, "failed to delete pod1") - err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for pod2 running status") }) ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func(ctx context.Context) { // Create the ReadWriteOncePod PVC. accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod} - l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) + l.volume = storageframework.CreateVolumeResourceWithAccessModes(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) podConfig := e2epod.Config{ NS: f.Namespace.Name, @@ -171,14 +171,14 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv // Create the first pod, which will take ownership of the ReadWriteOncePod PVC. pod1, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err, "failed to create spec for pod1") - _, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(context.TODO(), pod1, metav1.CreateOptions{}) + _, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod1") - err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for pod1 running status") l.pods = append(l.pods, pod1) // Get the node name for the first pod now that it's running. - pod1, err = l.cs.CoreV1().Pods(pod1.Namespace).Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err = l.cs.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod1") nodeName := pod1.Spec.NodeName @@ -188,7 +188,7 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv // Set the node name to that of the first pod. // Node name is set to bypass scheduling, which would enforce the access mode otherwise. pod2.Spec.NodeName = nodeName - _, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(context.TODO(), pod2, metav1.CreateOptions{}) + _, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(ctx, pod2, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod2") l.pods = append(l.pods, pod2) @@ -200,18 +200,18 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv "reason": events.FailedMountVolume, }.AsSelector().String() msg := "volume uses the ReadWriteOncePod access mode and is already in use by another pod" - err = e2eevents.WaitTimeoutForEvent(l.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart) + err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for FailedMount event for pod2") // Wait for the second pod to fail because it is stuck at container creating. reason := "ContainerCreating" - err = e2epod.WaitForPodContainerToFail(l.cs, pod2.Namespace, pod2.Name, 0, reason, f.Timeouts.PodStart) + err = e2epod.WaitForPodContainerToFail(ctx, l.cs, pod2.Namespace, pod2.Name, 0, reason, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for pod2 container to fail") // Delete the first pod and observe the second pod can now start. - err = e2epod.DeletePodWithWait(l.cs, pod1) + err = e2epod.DeletePodWithWait(ctx, l.cs, pod1) framework.ExpectNoError(err, "failed to delete pod1") - err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "failed to wait for pod2 running status") }) } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 38cedcd4d39..8c1cba0886d 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -132,9 +132,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, dc = f.DynamicClient // Now do the more expensive test initialization. - config = driver.PrepareTest(f) + config = driver.PrepareTest(ctx, f) - volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) + volumeResource = storageframework.CreateVolumeResource(ctx, dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) ginkgo.DeferCleanup(volumeResource.CleanupResource) ginkgo.By("[init] starting a pod to use the claim") @@ -168,8 +168,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // guaranteed, this flavor of the test doesn't // check the content of the snapshot. - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow)) - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow)) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "check pod after it terminated") // Get new copy of the claim @@ -178,25 +178,25 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, pvcNamespace := pod.Namespace parameters := map[string]string{} - sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters) + sr := storageframework.CreateSnapshotResource(ctx, sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters) ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts) - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(ctx, pvcName, metav1.GetOptions{}) framework.ExpectNoError(err, "get PVC") claimSize = pvc.Spec.Resources.Requests.Storage().String() sc = volumeResource.Sc // Get the bound PV ginkgo.By("[init] checking the PV") - _, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + _, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) vs := sr.Vs // get the snapshot and check SnapshotContent properties - vscontent := checkSnapshot(dc, sr, pattern) + vscontent := checkSnapshot(ctx, dc, sr, pattern) var restoredPVC *v1.PersistentVolumeClaim var restoredPod *v1.Pod @@ -227,7 +227,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, restoredPod) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) if pattern.VolType != storageframework.GenericEphemeralVolume { commands := e2evolume.GenerateReadFileCmd(datapath) _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) @@ -238,13 +238,12 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // Delete both Snapshot and restored Pod/PVC at the same time because different storage systems // have different ordering of deletion. Some may require delete the restored PVC first before // Snapshot deletion and some are opposite. - err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName()) + err = storageutils.DeleteSnapshotWithoutWaiting(ctx, dc, vs.GetNamespace(), vs.GetName()) framework.ExpectNoError(err) framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name) err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - deleteVolumeSnapshot(f, dc, sr, pattern, vscontent) - + deleteVolumeSnapshot(ctx, f, dc, sr, pattern, vscontent) }) ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func(ctx context.Context) { @@ -258,23 +257,23 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // The pod should be in the Success state. ginkgo.By("[init] check pod success") - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to fetch pod: %v", err) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow)) // Sync the pod to know additional fields. - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to fetch pod: %v", err) ginkgo.By("[init] checking the claim") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) // Get new copy of the claim. - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound PV. ginkgo.By("[init] checking the PV") - pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) // Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test). @@ -311,7 +310,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, ginkgo.By(fmt.Sprintf("[init] waiting until the node=%s is not using the volume=%s", nodeName, volumeName)) success := storageutils.WaitUntil(framework.Poll, f.Timeouts.PVDelete, func() bool { - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) volumesInUse := node.Status.VolumesInUse framework.Logf("current volumes in use: %+v", volumesInUse) @@ -328,11 +327,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // Take the snapshot. parameters := map[string]string{} - sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters) + sr := storageframework.CreateSnapshotResource(ctx, sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters) ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts) vs := sr.Vs // get the snapshot and check SnapshotContent properties - vscontent := checkSnapshot(dc, sr, pattern) + vscontent := checkSnapshot(ctx, dc, sr, pattern) ginkgo.By("Modifying source data test") var restoredPVC *v1.PersistentVolumeClaim @@ -361,7 +360,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, Name: vs.GetName(), } - restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{}) + restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(ctx, restoredPVC, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) { framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name) @@ -375,7 +374,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, ginkgo.By("starting a pod to use the snapshot") restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) ginkgo.DeferCleanup(StopPod, cs, restoredPod) - framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) commands := e2evolume.GenerateReadFileCmd(datapath) _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) framework.ExpectNoError(err) @@ -385,53 +384,53 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // Delete both Snapshot and restored Pod/PVC at the same time because different storage systems // have different ordering of deletion. Some may require delete the restored PVC first before // Snapshot deletion and some are opposite. - err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName()) + err = storageutils.DeleteSnapshotWithoutWaiting(ctx, dc, vs.GetNamespace(), vs.GetName()) framework.ExpectNoError(err) framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name) - err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(ctx, restoredPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) framework.Logf("deleting restored PVC %q/%q", restoredPVC.Namespace, restoredPVC.Name) - err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{}) + err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(ctx, restoredPVC.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - deleteVolumeSnapshot(f, dc, sr, pattern, vscontent) + deleteVolumeSnapshot(ctx, f, dc, sr, pattern, vscontent) }) }) }) } -func deleteVolumeSnapshot(f *framework.Framework, dc dynamic.Interface, sr *storageframework.SnapshotResource, pattern storageframework.TestPattern, vscontent *unstructured.Unstructured) { +func deleteVolumeSnapshot(ctx context.Context, f *framework.Framework, dc dynamic.Interface, sr *storageframework.SnapshotResource, pattern storageframework.TestPattern, vscontent *unstructured.Unstructured) { vs := sr.Vs // Wait for the Snapshot to be actually deleted from API server - err := storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete) + err := storageutils.WaitForNamespacedGVRDeletion(ctx, dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete) framework.ExpectNoError(err) switch pattern.SnapshotDeletionPolicy { case storageframework.DeleteSnapshot: ginkgo.By("checking the SnapshotContent has been deleted") - err = storageutils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) + err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) framework.ExpectNoError(err) case storageframework.RetainSnapshot: ginkgo.By("checking the SnapshotContent has not been deleted") - err = storageutils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) + err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) framework.ExpectError(err) } } -func checkSnapshot(dc dynamic.Interface, sr *storageframework.SnapshotResource, pattern storageframework.TestPattern) *unstructured.Unstructured { +func checkSnapshot(ctx context.Context, dc dynamic.Interface, sr *storageframework.SnapshotResource, pattern storageframework.TestPattern) *unstructured.Unstructured { vs := sr.Vs vsc := sr.Vsclass // Get new copy of the snapshot ginkgo.By("checking the snapshot") - vs, err := dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{}) + vs, err := dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(ctx, vs.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound snapshotContent snapshotStatus := vs.Object["status"].(map[string]interface{}) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) - vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(ctx, snapshotContentName, metav1.GetOptions{}) framework.ExpectNoError(err) snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{}) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index 531d415d5ad..ba2986dd37b 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -122,11 +122,11 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD f := framework.NewDefaultFramework("snapshottable-stress") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { driverInfo = driver.GetDriverInfo() snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver) cs = f.ClientSet - config := driver.PrepareTest(f) + config := driver.PrepareTest(ctx, f) stressTest = &snapshottableStressTest{ config: config, @@ -137,11 +137,11 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD } } - createPodsAndVolumes := func() { + createPodsAndVolumes := func(ctx context.Context) { for i := 0; i < stressTest.testOptions.NumPods; i++ { framework.Logf("Creating resources for pod %d/%d", i, stressTest.testOptions.NumPods-1) - volume := storageframework.CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) + volume := storageframework.CreateVolumeResource(ctx, driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) stressTest.volumes = append(stressTest.volumes, volume) podConfig := e2epod.Config{ @@ -163,7 +163,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD defer ginkgo.GinkgoRecover() defer wg.Done() - if _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}); err != nil { framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err) } }(i, pod) @@ -171,13 +171,13 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD wg.Wait() for i, pod := range stressTest.pods { - if err := e2epod.WaitForPodRunningInNamespace(cs, pod); err != nil { + if err := e2epod.WaitForPodRunningInNamespace(ctx, cs, pod); err != nil { framework.Failf("Failed to wait for pod-%d [%+v] turn into running status. Error: %v", i, pod, err) } } } - cleanup := func() { + cleanup := func(ctx context.Context) { framework.Logf("Stopping and waiting for all test routines to finish") stressTest.wg.Wait() @@ -194,7 +194,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD defer wg.Done() framework.Logf("Deleting snapshot %s/%s", snapshot.Vs.GetNamespace(), snapshot.Vs.GetName()) - err := snapshot.CleanupResource(f.Timeouts) + err := snapshot.CleanupResource(ctx, f.Timeouts) mu.Lock() defer mu.Unlock() errs = append(errs, err) @@ -209,7 +209,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD defer wg.Done() framework.Logf("Deleting pod %s", pod.Name) - err := e2epod.DeletePodWithWait(cs, pod) + err := e2epod.DeletePodWithWait(ctx, cs, pod) mu.Lock() defer mu.Unlock() errs = append(errs, err) @@ -224,7 +224,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD defer wg.Done() framework.Logf("Deleting volume %s", volume.Pvc.GetName()) - err := volume.CleanupResource() + err := volume.CleanupResource(ctx) mu.Lock() defer mu.Unlock() errs = append(errs, err) @@ -237,13 +237,10 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resources") } - ginkgo.BeforeEach(func() { - init() - ginkgo.DeferCleanup(cleanup) - createPodsAndVolumes() - }) - ginkgo.It("should support snapshotting of many volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { + init(ctx) + ginkgo.DeferCleanup(cleanup) + createPodsAndVolumes(ctx) // Repeatedly create and delete snapshots of each volume. for i := 0; i < stressTest.testOptions.NumPods; i++ { for j := 0; j < stressTest.testOptions.NumSnapshots; j++ { @@ -270,7 +267,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD default: framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1) parameters := map[string]string{} - snapshot := storageframework.CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts, parameters) + snapshot := storageframework.CreateSnapshotResource(ctx, snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts, parameters) stressTest.snapshotsMutex.Lock() defer stressTest.snapshotsMutex.Unlock() stressTest.snapshots = append(stressTest.snapshots, snapshot) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 5a0fca0d6c5..c8c2b787d81 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -119,14 +119,14 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) l.hostExec = storageutils.NewHostExec(f) // Setup subPath test dependent resource @@ -166,44 +166,44 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte l.filePathInVolume = filepath.Join(l.subPathDir, fileName) } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.pod != nil { ginkgo.By("Deleting pod") - err := e2epod.DeletePodWithWait(f.ClientSet, l.pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod) errs = append(errs, err) l.pod = nil } if l.resource != nil { - errs = append(errs, l.resource.CleanupResource()) + errs = append(errs, l.resource.CleanupResource(ctx)) l.resource = nil } framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") if l.hostExec != nil { - l.hostExec.Cleanup() + l.hostExec.Cleanup(ctx) } - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } driverName := driver.GetDriverInfo().Name ginkgo.It("should support non-existent path", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Write the file in the subPath from init container 1 setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(f, l.filePathInVolume, l.pod, 1) + testReadFile(ctx, f, l.filePathInVolume, l.pod, 1) }) ginkgo.It("should support existing directory", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the directory @@ -213,32 +213,32 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(f, l.filePathInVolume, l.pod, 1) + testReadFile(ctx, f, l.filePathInVolume, l.pod, 1) }) ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the file in the init container setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume)) // Read it from inside the subPath from container 0 - testReadFile(f, l.filePathInSubpath, l.pod, 0) + testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0) }) ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the file in the init container setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir)) - TestBasicSubpath(f, f.Namespace.Name, l.pod) + TestBasicSubpath(ctx, f, f.Namespace.Name, l.pod) }) ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the subpath outside the volume @@ -250,33 +250,33 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte } setInitCommand(l.pod, command) // Pod should fail - testPodFailSubpath(f, l.pod, false) + testPodFailSubpath(ctx, f, l.pod, false) }) ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the subpath outside the volume setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, l.pod, false) + testPodFailSubpath(ctx, f, l.pod, false) }) ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the subpath outside the volume setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, l.pod, false) + testPodFailSubpath(ctx, f, l.pod, false) }) ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the subpath outside the volume @@ -288,11 +288,11 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte } setInitCommand(l.pod, command) // Pod should fail - testPodFailSubpath(f, l.pod, false) + testPodFailSubpath(ctx, f, l.pod, false) }) ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) subpathDir1 := filepath.Join(volumePath, "subpath1") @@ -314,33 +314,33 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Write the files from container 0 and instantly read them back addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2) - testMultipleReads(f, l.pod, 0, filepath1, filepath2) + testMultipleReads(ctx, f, l.pod, 0, filepath1, filepath2) }) ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the directory var command string command = fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath) setInitCommand(l.pod, command) - testPodContainerRestart(f, l.pod) + testPodContainerRestart(ctx, f, l.pod) }) ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the file setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath)) - testPodContainerRestart(f, l.pod) + testPodContainerRestart(ctx, f, l.pod) }) ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if strings.HasPrefix(driverName, "hostPath") { @@ -348,12 +348,12 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName) } - testSubpathReconstruction(f, l.hostExec, l.pod, false) + testSubpathReconstruction(ctx, f, l.hostExec, l.pod, false) }) ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if strings.HasPrefix(driverName, "hostPath") { @@ -361,11 +361,11 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName) } - testSubpathReconstruction(f, l.hostExec, l.pod, true) + testSubpathReconstruction(ctx, f, l.hostExec, l.pod, true) }) ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the directory @@ -376,11 +376,11 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Read it from inside the subPath from container 0 l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, l.filePathInSubpath, l.pod, 0) + testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0) }) ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Create the file @@ -391,11 +391,11 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Read it from inside the subPath from container 0 l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, volumePath, l.pod, 0) + testReadFile(ctx, f, volumePath, l.pod, 0) }) ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if l.roVolSource == nil { e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType) @@ -410,7 +410,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from inside the subPath from container 0 - testReadFile(f, l.filePathInSubpath, l.pod, 0) + testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0) // Reset the pod l.pod = origpod @@ -419,18 +419,18 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource // Read it from inside the subPath from container 0 - testReadFile(f, l.filePathInSubpath, l.pod, 0) + testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0) }) ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if l.roVolSource == nil { e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType) } // Format the volume while it's writable - formatVolume(f, l.formatPod) + formatVolume(ctx, f, l.formatPod) // Set volume source to read only l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource @@ -439,13 +439,13 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0]) // Pod should fail - testPodFailSubpath(f, l.pod, true) + testPodFailSubpath(ctx, f, l.pod, true) }) // Set this test linux-only because the test will fail in Windows when // deleting a dir from one container while another container still use it. ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) // Change volume container to busybox so we can exec later @@ -455,15 +455,15 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) removeUnusedContainers(l.pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, l.pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) - return e2epod.DeletePodWithWait(f.ClientSet, pod) + return e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) }) // Wait for pod to be running - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory @@ -478,20 +478,20 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte } // TestBasicSubpath runs basic subpath test -func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) { - TestBasicSubpathFile(f, contents, pod, volumePath) +func TestBasicSubpath(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod) { + TestBasicSubpathFile(ctx, f, contents, pod, volumePath) } // TestBasicSubpathFile runs basic subpath file test -func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) { +func TestBasicSubpathFile(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod, filepath string) { setReadCommand(filepath, &pod.Spec.Containers[0]) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - e2eoutput.TestContainerOutput(f, "atomic-volume-subpath", pod, 0, []string{contents}) + e2eoutput.TestContainerOutput(ctx, f, "atomic-volume-subpath", pod, 0, []string{contents}) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := e2epod.DeletePodWithWait(f.ClientSet, pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } @@ -668,10 +668,10 @@ func addMultipleWrites(container *v1.Container, file1 string, file2 string) { } } -func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { +func testMultipleReads(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - e2eoutput.TestContainerOutput(f, "multi_subpath", pod, containerIndex, []string{ + e2eoutput.TestContainerOutput(ctx, f, "multi_subpath", pod, containerIndex, []string{ "content of file \"" + file1 + "\": mount-tester new file", "content of file \"" + file2 + "\": mount-tester new file", }) @@ -685,32 +685,32 @@ func setReadCommand(file string, container *v1.Container) { } } -func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { +func testReadFile(ctx context.Context, f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { setReadCommand(file, &pod.Spec.Containers[containerIndex]) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - e2eoutput.TestContainerOutput(f, "subpath", pod, containerIndex, []string{ + e2eoutput.TestContainerOutput(ctx, f, "subpath", pod, containerIndex, []string{ "content of file \"" + file + "\": mount-tester new file", }) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := e2epod.DeletePodWithWait(f.ClientSet, pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } -func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) { - testPodFailSubpathError(f, pod, "subPath", allowContainerTerminationError) +func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) { + testPodFailSubpathError(ctx, f, pod, "subPath", allowContainerTerminationError) } -func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { +func testPodFailSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod) ginkgo.By("Checking for subpath error in container status") - err = waitForPodSubpathError(f, pod, allowContainerTerminationError) + err = waitForPodSubpathError(ctx, f, pod, allowContainerTerminationError) framework.ExpectNoError(err, "while waiting for subpath failure") } @@ -725,14 +725,14 @@ func findSubpathContainerName(pod *v1.Pod) string { return "" } -func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) error { +func waitForPodSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) error { subpathContainerName := findSubpathContainerName(pod) if subpathContainerName == "" { return fmt.Errorf("failed to find container that uses subpath") } waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -790,7 +790,7 @@ func (h *podContainerRestartHooks) FixLivenessProbe(pod *v1.Pod, probeFilePath s // testPodContainerRestartWithHooks tests that container restarts to stabilize. // hooks wrap functions between container restarts. -func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) { +func testPodContainerRestartWithHooks(ctx context.Context, f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage() @@ -804,10 +804,10 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks // Start pod ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod) - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for pod to be running") ginkgo.By("Failing liveness probe") @@ -819,7 +819,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks ginkgo.By("Waiting for container to restart") restarts := int32(0) err = wait.PollImmediate(10*time.Second, f.Timeouts.PodDelete+f.Timeouts.PodStart, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -850,7 +850,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks stableCount := int(0) stableThreshold := int(time.Minute / framework.Poll) err = wait.PollImmediate(framework.Poll, f.Timeouts.PodStartSlow, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -876,8 +876,8 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks } // testPodContainerRestart tests that the existing subpath mount is detected when a container restarts -func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { - testPodContainerRestartWithHooks(f, pod, &podContainerRestartHooks{ +func testPodContainerRestart(ctx context.Context, f *framework.Framework, pod *v1.Pod) { + testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{ AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) { p.Spec.Containers[0].LivenessProbe = &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -915,9 +915,9 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { // 2. update configmap // 3. container restarts // 4. container becomes stable after configmap mounted file has been modified -func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, original, modified *v1.ConfigMap) { +func TestPodContainerRestartWithConfigmapModified(ctx context.Context, f *framework.Framework, original, modified *v1.ConfigMap) { ginkgo.By("Create configmap") - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), original, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, original, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating configmap to modify") } @@ -931,7 +931,7 @@ func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, origin pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath)) modifiedValue := modified.Data[subpath] - testPodContainerRestartWithHooks(f, pod, &podContainerRestartHooks{ + testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{ AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) { p.Spec.Containers[0].LivenessProbe = &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -951,23 +951,23 @@ func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, origin framework.ExpectNoError(err, "while failing liveness probe") }, FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) { - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), modified, metav1.UpdateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, modified, metav1.UpdateOptions{}) framework.ExpectNoError(err, "while fixing liveness probe") }, }) } -func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.HostExec, pod *v1.Pod, forceDelete bool) { +func testSubpathReconstruction(ctx context.Context, f *framework.Framework, hostExec storageutils.HostExec, pod *v1.Pod, forceDelete bool) { // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption() // Disruptive test run serially, we can cache all voluem global mount // points and verify after the test that we do not leak any global mount point. - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "while listing schedulable nodes") globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items)) for _, node := range nodeList.Items { - globalMountPointsByNode[node.Name] = storageutils.FindVolumeGlobalMountPoints(hostExec, &node) + globalMountPointsByNode[node.Name] = storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, &node) } // Change to busybox @@ -984,12 +984,12 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating pod") - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for pod to be running") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "while getting pod") var podNode *v1.Node @@ -1000,11 +1000,11 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos } framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes") - storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, nil, volumePath) + storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath) if podNode != nil { mountPoints := globalMountPointsByNode[podNode.Name] - mountPointsAfter := storageutils.FindVolumeGlobalMountPoints(hostExec, podNode) + mountPointsAfter := storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, podNode) s1 := mountPointsAfter.Difference(mountPoints) s2 := mountPoints.Difference(mountPointsAfter) gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1) @@ -1012,15 +1012,15 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos } } -func formatVolume(f *framework.Framework, pod *v1.Pod) { +func formatVolume(ctx context.Context, f *framework.Framework, pod *v1.Pod) { ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "while creating volume init pod") - err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for volume init pod to succeed") - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting volume init pod") } diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 9ffd0f6f7b5..877bba5f405 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -104,12 +104,12 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt f := framework.NewFrameworkWithCustomTimeouts("topology", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() *topologyTest { + init := func(ctx context.Context) *topologyTest { dDriver, _ = driver.(storageframework.DynamicPVTestDriver) l := &topologyTest{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) l.resource = storageframework.VolumeResource{ Config: l.config, @@ -131,13 +131,13 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt } // We collect 1 additional topology, if possible, for the conflicting topology test // case, but it's not needed for the positive test - l.allTopologies, err = t.getCurrentTopologies(cs, keys, dInfo.NumAllowedTopologies+1) + l.allTopologies, err = t.getCurrentTopologies(ctx, cs, keys, dInfo.NumAllowedTopologies+1) framework.ExpectNoError(err, "failed to get current driver topologies") if len(l.allTopologies) < dInfo.NumAllowedTopologies { e2eskipper.Skipf("Not enough topologies in cluster -- skipping") } - l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType) + l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType) framework.ExpectNotEqual(l.resource.Sc, nil, "driver failed to provide a StorageClass") l.resource.Sc.VolumeBindingMode = &pattern.BindingMode @@ -150,14 +150,14 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt StorageClassName: &(l.resource.Sc.Name), }, l.config.Framework.Namespace.Name) - migrationCheck := newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + migrationCheck := newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) ginkgo.DeferCleanup(migrationCheck.validateMigrationVolumeOpCounts) return l } ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func(ctx context.Context) { - l := init() + l := init(ctx) // If possible, exclude one topology, otherwise allow them all excludedIndex := -1 @@ -166,23 +166,23 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt } allowedTopologies := t.setAllowedTopologies(l.resource.Sc, l.allTopologies, excludedIndex) - t.createResources(cs, l, nil) + t.createResources(ctx, cs, l, nil) - err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err) ginkgo.By("Verifying pod scheduled to correct node") - pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(context.TODO(), l.pod.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(ctx, l.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - node, err := cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) t.verifyNodeTopology(node, allowedTopologies) }) ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func(ctx context.Context) { - l := init() + l := init(ctx) if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 { e2eskipper.Skipf("Not enough topologies in cluster -- skipping") @@ -213,19 +213,19 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt }, }, } - t.createResources(cs, l, affinity) + t.createResources(ctx, cs, l, affinity) // Wait for pod to fail scheduling // With delayed binding, the scheduler errors before provisioning // With immediate binding, the volume gets provisioned but cannot be scheduled - err = e2epod.WaitForPodNameUnschedulableInNamespace(cs, l.pod.Name, l.pod.Namespace) + err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, cs, l.pod.Name, l.pod.Namespace) framework.ExpectNoError(err) }) } // getCurrentTopologies() goes through all Nodes and returns up to maxCount unique driver topologies -func (t *topologyTestSuite) getCurrentTopologies(cs clientset.Interface, keys []string, maxCount int) ([]topology, error) { - nodes, err := e2enode.GetReadySchedulableNodes(cs) +func (t *topologyTestSuite) getCurrentTopologies(ctx context.Context, cs clientset.Interface, keys []string, maxCount int) ([]topology, error) { + nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) if err != nil { return nil, err } @@ -309,16 +309,16 @@ func (t *topologyTestSuite) verifyNodeTopology(node *v1.Node, allowedTopos []top framework.Failf("node %v topology labels %+v doesn't match allowed topologies +%v", node.Name, node.Labels, allowedTopos) } -func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyTest, affinity *v1.Affinity) { +func (t *topologyTestSuite) createResources(ctx context.Context, cs clientset.Interface, l *topologyTest, affinity *v1.Affinity) { var err error framework.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.Sc, l.resource.Pvc) ginkgo.By("Creating sc") - l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), l.resource.Sc, metav1.CreateOptions{}) + l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(ctx, l.resource.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating pvc") - l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(context.TODO(), l.resource.Pvc, metav1.CreateOptions{}) + l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(ctx, l.resource.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating pod") @@ -331,17 +331,17 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT } l.pod, err = e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err) - l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod, metav1.CreateOptions{}) + l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(ctx, l.pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func (t *topologyTestSuite) CleanupResources(cs clientset.Interface, l *topologyTest) { +func (t *topologyTestSuite) CleanupResources(ctx context.Context, cs clientset.Interface, l *topologyTest) { if l.pod != nil { ginkgo.By("Deleting pod") - err := e2epod.DeletePodWithWait(cs, l.pod) + err := e2epod.DeletePodWithWait(ctx, cs, l.pod) framework.ExpectNoError(err, "while deleting pod") } - err := l.resource.CleanupResource() + err := l.resource.CleanupResource(ctx) framework.ExpectNoError(err, "while clean up resource") } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 676a99b9c66..4cd502490c5 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -116,44 +116,44 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.pod != nil { ginkgo.By("Deleting pod") - err := e2epod.DeletePodWithWait(f.ClientSet, l.pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod) errs = append(errs, err) l.pod = nil } if l.pod2 != nil { ginkgo.By("Deleting pod2") - err := e2epod.DeletePodWithWait(f.ClientSet, l.pod2) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod2) errs = append(errs, err) l.pod2 = nil } if l.resource != nil { - errs = append(errs, l.resource.CleanupResource()) + errs = append(errs, l.resource.CleanupResource(ctx)) l.resource = nil } framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } if !pattern.AllowExpansion { ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) var err error @@ -165,12 +165,12 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("1Gi")) framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) - _, err = ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet) + _, err = ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet) framework.ExpectError(err, "While updating non-expandable PVC") }) } else { ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if !driver.GetDriverInfo().Capabilities[storageframework.CapOfflineExpansion] { @@ -186,12 +186,12 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, NodeSelection: l.config.ClientNodeSelection, ImageID: e2epod.GetDefaultTestImageID(), } - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod) framework.ExpectNoError(err, "While creating pods for resizing") ginkgo.By("Deleting the previously created pod") - err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod) framework.ExpectNoError(err, "while deleting pod for resizing") // We expand the PVC while no pod is using it to ensure offline expansion @@ -200,7 +200,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("1Gi")) framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) - newPVC, err := ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet) + newPVC, err := ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet) framework.ExpectNoError(err, "While updating pvc for more size") l.resource.Pvc = newPVC gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil()) @@ -211,11 +211,11 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, } ginkgo.By("Waiting for cloudprovider resize to finish") - err = WaitForControllerVolumeResize(l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod) + err = WaitForControllerVolumeResize(ctx, l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Checking for conditions on pvc") - npvc, err := WaitForPendingFSResizeCondition(l.resource.Pvc, f.ClientSet) + npvc, err := WaitForPendingFSResizeCondition(ctx, l.resource.Pvc, f.ClientSet) framework.ExpectNoError(err, "While waiting for pvc to have fs resizing condition") l.resource.Pvc = npvc @@ -227,12 +227,12 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, NodeSelection: l.config.ClientNodeSelection, ImageID: e2epod.GetDefaultTestImageID(), } - l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, resizedPodStartupTimeout) + l.pod2, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, resizedPodStartupTimeout) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod2) framework.ExpectNoError(err, "while recreating pod for resizing") ginkgo.By("Waiting for file system resize to finish") - l.resource.Pvc, err = WaitForFSResize(l.resource.Pvc, f.ClientSet) + l.resource.Pvc, err = WaitForFSResize(ctx, l.resource.Pvc, f.ClientSet) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := l.resource.Pvc.Status.Conditions @@ -240,7 +240,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, }) ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { @@ -256,7 +256,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, NodeSelection: l.config.ClientNodeSelection, ImageID: e2epod.GetDefaultTestImageID(), } - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart) + l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod) framework.ExpectNoError(err, "While creating pods for resizing") @@ -266,7 +266,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("1Gi")) framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) - newPVC, err := ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet) + newPVC, err := ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet) framework.ExpectNoError(err, "While updating pvc for more size") l.resource.Pvc = newPVC gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil()) @@ -277,11 +277,11 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, } ginkgo.By("Waiting for cloudprovider resize to finish") - err = WaitForControllerVolumeResize(l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod) + err = WaitForControllerVolumeResize(ctx, l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Waiting for file system resize to finish") - l.resource.Pvc, err = WaitForFSResize(l.resource.Pvc, f.ClientSet) + l.resource.Pvc, err = WaitForFSResize(ctx, l.resource.Pvc, f.ClientSet) framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := l.resource.Pvc.Status.Conditions @@ -292,7 +292,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, } // ExpandPVCSize expands PVC size -func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { +func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { pvcName := origPVC.Name updatedPVC := origPVC.DeepCopy() @@ -301,13 +301,13 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c var lastUpdateError error waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for resizing: %v", pvcName, err) } updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(context.TODO(), updatedPVC, metav1.UpdateOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(ctx, updatedPVC, metav1.UpdateOptions{}) if err != nil { framework.Logf("Error updating pvc %s: %v", pvcName, err) lastUpdateError = err @@ -325,10 +325,10 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c } // WaitForResizingCondition waits for the pvc condition to be PersistentVolumeClaimResizing -func WaitForResizingCondition(pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { - waitErr := wait.PollImmediate(resizePollInterval, duration, func() (bool, error) { +func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { + waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, duration, func(ctx context.Context) (bool, error) { var err error - updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err) @@ -349,12 +349,12 @@ func WaitForResizingCondition(pvc *v1.PersistentVolumeClaim, c clientset.Interfa } // WaitForControllerVolumeResize waits for the controller resize to be finished -func WaitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error { +func WaitForControllerVolumeResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error { pvName := pvc.Spec.VolumeName - waitErr := wait.PollImmediate(resizePollInterval, timeout, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, timeout, func(ctx context.Context) (bool, error) { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err) } @@ -374,11 +374,11 @@ func WaitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.In } // WaitForPendingFSResizeCondition waits for pvc to have resize condition -func WaitForPendingFSResizeCondition(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { +func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { var updatedPVC *v1.PersistentVolumeClaim - waitErr := wait.PollImmediate(resizePollInterval, pvcConditionSyncPeriod, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, pvcConditionSyncPeriod, func(ctx context.Context) (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) @@ -402,11 +402,11 @@ func WaitForPendingFSResizeCondition(pvc *v1.PersistentVolumeClaim, c clientset. } // WaitForFSResize waits for the filesystem in the pv to be resized -func WaitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { +func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { var updatedPVC *v1.PersistentVolumeClaim - waitErr := wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) { + waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, totalResizeWaitPeriod, func(ctx context.Context) (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index cac598b79fb..a8530e85a64 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -113,34 +113,34 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) if l.resource.VolSource == nil { e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.resource != nil { - errs = append(errs, l.resource.CleanupResource()) + errs = append(errs, l.resource.CleanupResource(ctx)) l.resource = nil } framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) cs := f.ClientSet @@ -154,7 +154,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt podSec := v1.PodSecurityContext{ FSGroup: fsGroup, } - err := testVolumeIO(f, cs, storageframework.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes) + err := testVolumeIO(ctx, f, cs, storageframework.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes) framework.ExpectNoError(err) }) } @@ -305,7 +305,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) { // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` // // bytes. -func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { +func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value loopCnt := storageframework.MinFileSize / int64(len(writeBlk)) @@ -318,14 +318,14 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu ginkgo.By(fmt.Sprintf("starting %s", clientPod.Name)) podsNamespacer := cs.CoreV1().Pods(config.Namespace) - clientPod, err = podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{}) + clientPod, err = podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) } ginkgo.DeferCleanup(func(ctx context.Context) { deleteFile(f, clientPod, ddInput) ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) - e := e2epod.DeletePodWithWait(cs, clientPod) + e := e2epod.DeletePodWithWait(ctx, cs, clientPod) if e != nil { framework.Logf("client pod failed to delete: %v", e) if err == nil { // delete err is returned if err is not set @@ -337,7 +337,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu } }) - err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart) if err != nil { return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) } diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index a9e42fb65b0..c9b0328ec00 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -112,22 +112,22 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, f := framework.NewFrameworkWithCustomTimeouts("stress", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { cs = f.ClientSet l = &volumeStressTest{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) l.volumes = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} l.testOptions = *dInfo.StressTestOptions } - createPodsAndVolumes := func() { + createPodsAndVolumes := func(ctx context.Context) { for i := 0; i < l.testOptions.NumPods; i++ { framework.Logf("Creating resources for pod %v/%v", i, l.testOptions.NumPods-1) - r := storageframework.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) + r := storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) l.volumes = append(l.volumes, r) podConfig := e2epod.Config{ NS: f.Namespace.Name, @@ -141,7 +141,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, } } - cleanup := func() { + cleanup := func(ctx context.Context) { framework.Logf("Stopping and waiting for all test routines to finish") l.wg.Wait() @@ -158,7 +158,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, defer wg.Done() framework.Logf("Deleting pod %v", pod.Name) - err := e2epod.DeletePodWithWait(cs, pod) + err := e2epod.DeletePodWithWait(ctx, cs, pod) mu.Lock() defer mu.Unlock() errs = append(errs, err) @@ -173,7 +173,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, defer wg.Done() framework.Logf("Deleting volume %s", volume.Pvc.GetName()) - err := volume.CleanupResource() + err := volume.CleanupResource(ctx) mu.Lock() defer mu.Unlock() errs = append(errs, err) @@ -182,13 +182,13 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, wg.Wait() framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(cleanup) - createPodsAndVolumes() + createPodsAndVolumes(ctx) // Restart pod repeatedly for i := 0; i < l.testOptions.NumPods; i++ { podIndex := i @@ -211,19 +211,19 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, default: pod := l.pods[podIndex] framework.Logf("Pod-%v [%v], Iteration %v/%v", podIndex, pod.Name, j, l.testOptions.NumRestarts-1) - _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err := cs.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err) } - err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) if err != nil { framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) } // TODO: write data per pod and validate it every time - err = e2epod.DeletePodWithWait(f.ClientSet, pod) + err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) if err != nil { framework.Failf("Failed to delete pod-%v [%+v]. Error: %v", podIndex, pod, err) } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index e18906c12e5..90864927989 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -136,21 +136,21 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, l.ns = f.Namespace l.cs = f.ClientSet - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) ginkgo.By("Picking a node") // Some CSI drivers are deployed to a single node (e.g csi-hostpath), // so we use that node instead of picking a random one. nodeName := l.config.ClientNodeSelection.Name if nodeName == "" { - node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) nodeName = node.Name } framework.Logf("Selected node %s", nodeName) ginkgo.By("Checking node limits") - limit, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo) + limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo) framework.ExpectNoError(err) framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name) @@ -160,7 +160,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver) - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) ginkgo.DeferCleanup(l.resource.CleanupResource) ginkgo.DeferCleanup(cleanupTest, l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete) @@ -183,7 +183,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, ClaimSize: claimSize, StorageClassName: &l.resource.Sc.Name, }, l.ns.Name) - pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) l.pvcNames = append(l.pvcNames, pvc.Name) pvcs = append(pvcs, pvc) @@ -198,18 +198,18 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, } pod, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err) - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) l.podNames = append(l.podNames, pod.Name) } ginkgo.By("Waiting for all PVCs to get Bound") - l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.ns.Name, l.pvcNames) + l.pvNames, err = waitForAllPVCsBound(ctx, l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.ns.Name, l.pvcNames) framework.ExpectNoError(err) ginkgo.By("Waiting for the pod(s) running") for _, podName := range l.podNames { - err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, podName, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, podName, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart) framework.ExpectNoError(err) } @@ -218,7 +218,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, l.podNames = append(l.podNames, pod.Name) ginkgo.By("Waiting for the pod to get unschedulable with the right message") - err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, pod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { + err = e2epod.WaitForPodCondition(ctx, l.cs, l.ns.Name, pod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase == v1.PodPending { reg, err := regexp.Compile(`max.+volume.+count`) if err != nil { @@ -248,7 +248,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, l.ns = f.Namespace l.cs = f.ClientSet - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) nodeNames := []string{} if l.config.ClientNodeSelection.Name != "" { @@ -256,7 +256,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // so we check that node instead of checking all of them nodeNames = append(nodeNames, l.config.ClientNodeSelection.Name) } else { - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) for _, node := range nodeList.Items { nodeNames = append(nodeNames, node.Name) @@ -265,7 +265,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, for _, nodeName := range nodeNames { ginkgo.By("Checking csinode limits") - _, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo) + _, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo) if err != nil { framework.Failf("Expected volume limits to be set, error: %v", err) } @@ -273,16 +273,16 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, }) } -func cleanupTest(cs clientset.Interface, ns string, podNames, pvcNames []string, pvNames sets.String, timeout time.Duration) error { +func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podNames, pvcNames []string, pvNames sets.String, timeout time.Duration) error { var cleanupErrors []string for _, podName := range podNames { - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}) + err := cs.CoreV1().Pods(ns).Delete(ctx, podName, metav1.DeleteOptions{}) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", podName, err)) } } for _, pvcName := range pvcNames { - err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{}) + err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{}) if !apierrors.IsNotFound(err) { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete PVC %s: %s", pvcName, err)) } @@ -293,7 +293,7 @@ func cleanupTest(cs clientset.Interface, ns string, podNames, pvcNames []string, err := wait.Poll(5*time.Second, timeout, func() (bool, error) { existing := 0 for _, pvName := range pvNames.UnsortedList() { - _, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + _, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err == nil { existing++ } else { @@ -320,12 +320,12 @@ func cleanupTest(cs clientset.Interface, ns string, podNames, pvcNames []string, } // waitForAllPVCsBound waits until the given PVCs are all bound. It then returns the bound PVC names as a set. -func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, ns string, pvcNames []string) (sets.String, error) { +func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout time.Duration, ns string, pvcNames []string) (sets.String, error) { pvNames := sets.NewString() err := wait.Poll(5*time.Second, timeout, func() (bool, error) { unbound := 0 for _, pvcName := range pvcNames { - pvc, err := cs.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) + pvc, err := cs.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { return false, err } @@ -347,15 +347,15 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, ns strin return pvNames, nil } -func getNodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { +func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { if len(driverInfo.InTreePluginName) == 0 { - return getCSINodeLimits(cs, config, nodeName, driverInfo) + return getCSINodeLimits(ctx, cs, config, nodeName, driverInfo) } - return getInTreeNodeLimits(cs, nodeName, driverInfo) + return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo) } -func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { - node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { + node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return 0, err } @@ -381,11 +381,11 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *st return int(limit.Value()), nil } -func getCSINodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { +func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { // Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything. var limit int err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) { - csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + csiNode, err := cs.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { framework.Logf("%s", err) return false, nil diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index f6d120c2ccd..6c52a1953ee 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -110,19 +110,19 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} l.ns = f.Namespace l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } // manualInit initializes l.VolumeResource without creating the PV & PVC objects. - manualInit := func() { - init() + manualInit := func(ctx context.Context) { + init(ctx) fsType := pattern.FsType volBindMode := storagev1.VolumeBindingImmediate @@ -139,7 +139,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa } // Create volume for pre-provisioned volume tests - l.Volume = storageframework.CreateVolume(driver, l.config, pattern.VolType) + l.Volume = storageframework.CreateVolume(ctx, driver, l.config, pattern.VolType) switch pattern.VolType { case storageframework.PreprovisionedPV: @@ -161,7 +161,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa } case storageframework.DynamicPV: if dDriver, ok := driver.(storageframework.DynamicPVTestDriver); ok { - l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType) + l.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, fsType) if l.Sc == nil { e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } @@ -182,13 +182,13 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa } } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error - errs = append(errs, l.CleanupResource()) + errs = append(errs, l.CleanupResource(ctx)) errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } // We register different tests depending on the drive @@ -197,25 +197,25 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa case storageframework.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) { - manualInit() + manualInit(ctx) ginkgo.DeferCleanup(cleanup) var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{}) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(ctx, l.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(context.TODO(), l.Pv, metav1.CreateOptions{}) + l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(ctx, l.Pv, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pv") // Prebind pv l.Pvc.Spec.VolumeName = l.Pv.Name - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{}) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, l.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pvc") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") ginkgo.By("Creating pod") podConfig := e2epod.Config{ @@ -228,10 +228,10 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa pod, err := e2epod.MakeSecPod(&podConfig) framework.ExpectNoError(err, "Failed to create pod") - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod), "Failed to delete pod") }() eventSelector := fields.Set{ @@ -242,14 +242,14 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa }.AsSelector().String() msg := "Unable to attach or mount volumes" - err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart) + err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart) // Events are unreliable, don't depend on the event. It's used only to speed up the test. if err != nil { framework.Logf("Warning: did not get event about FailedMountVolume") } // Check the pod is still not running - p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) @@ -258,17 +258,17 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa case storageframework.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) { - manualInit() + manualInit(ctx) ginkgo.DeferCleanup(cleanup) var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{}) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(ctx, l.Sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{}) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, l.Pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pvc") eventSelector := fields.Set{ @@ -280,14 +280,14 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa // The error message is different for each storage driver msg := "" - err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision) + err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision) // Events are unreliable, don't depend on the event. It's used only to speed up the test. if err != nil { framework.Logf("Warning: did not get event about provisioning failed") } // Check the pvc is still pending - pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(context.TODO(), l.Pvc.Name, metav1.GetOptions{}) + pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(ctx, l.Pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)") framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending") }) @@ -298,9 +298,9 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func(ctx context.Context) { skipTestIfBlockNotSupported(driver) - init() + init(ctx) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.VolumeResource = *storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) ginkgo.DeferCleanup(cleanup) ginkgo.By("Creating pod") @@ -318,10 +318,10 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa pod = swapVolumeMode(pod) // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod), "Failed to delete pod") }() ginkgo.By("Waiting for the pod to fail") @@ -339,14 +339,14 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa } else { msg = "has volumeMode Filesystem, but is specified in volumeDevices" } - err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart) + err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart) // Events are unreliable, don't depend on them. They're used only to speed up the test. if err != nil { framework.Logf("Warning: did not get event about mismatched volume use") } // Check the pod is still not running - p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) @@ -355,9 +355,9 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa if pattern.VolMode == v1.PersistentVolumeBlock { skipTestIfBlockNotSupported(driver) } - init() + init(ctx) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.VolumeResource = *storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) ginkgo.DeferCleanup(cleanup) ginkgo.By("Creating pod") @@ -377,26 +377,26 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa } // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod)) }() - err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(ctx, l.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err) // Reload the pod to get its node - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node") - node, err := l.cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) + node, err := l.cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Listing mounted volumes in the pod") hostExec := storageutils.NewHostExec(f) ginkgo.DeferCleanup(hostExec.Cleanup) - volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node) + volumePaths, devicePaths, err := listPodVolumePluginDirectory(ctx, hostExec, pod, node) framework.ExpectNoError(err) driverInfo := driver.GetDriverInfo() @@ -477,24 +477,24 @@ func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod { // // /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt // /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0 -func listPodVolumePluginDirectory(h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) { +func listPodVolumePluginDirectory(ctx context.Context, h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) { mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes") devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices") - mounts, err = listPodDirectory(h, mountPath, node) + mounts, err = listPodDirectory(ctx, h, mountPath, node) if err != nil { return nil, nil, err } - devices, err = listPodDirectory(h, devicePath, node) + devices, err = listPodDirectory(ctx, h, devicePath, node) if err != nil { return nil, nil, err } return mounts, devices, nil } -func listPodDirectory(h storageutils.HostExec, path string, node *v1.Node) ([]string, error) { +func listPodDirectory(ctx context.Context, h storageutils.HostExec, path string, node *v1.Node) ([]string, error) { // Return no error if the directory does not exist (e.g. there are no block volumes used) - _, err := h.IssueCommandWithResult("test ! -d "+path, node) + _, err := h.IssueCommandWithResult(ctx, "test ! -d "+path, node) if err == nil { // The directory does not exist return nil, nil @@ -504,7 +504,7 @@ func listPodDirectory(h storageutils.HostExec, path string, node *v1.Node) ([]st // Inside /var/lib/kubelet/pods//volumes, look for /, hence depth 2 cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path) - out, err := h.IssueCommandWithResult(cmd, node) + out, err := h.IssueCommandWithResult(ctx, cmd, node) if err != nil { return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err) } diff --git a/test/e2e/storage/testsuites/volumeperf.go b/test/e2e/storage/testsuites/volumeperf.go index a5a93bddea3..528c30a6e19 100644 --- a/test/e2e/storage/testsuites/volumeperf.go +++ b/test/e2e/storage/testsuites/volumeperf.go @@ -128,18 +128,18 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri f := framework.NewFramework("volume-lifecycle-performance", frameworkOptions, nil) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Closing informer channel") close(l.stopCh) ginkgo.By("Deleting all PVCs") for _, pvc := range l.pvcs { - err := e2epv.DeletePersistentVolumeClaim(l.cs, pvc.Name, pvc.Namespace) + err := e2epv.DeletePersistentVolumeClaim(ctx, l.cs, pvc.Name, pvc.Namespace) framework.ExpectNoError(err) - err = e2epv.WaitForPersistentVolumeDeleted(l.cs, pvc.Spec.VolumeName, 1*time.Second, 5*time.Minute) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, l.cs, pvc.Spec.VolumeName, 1*time.Second, 5*time.Minute) framework.ExpectNoError(err) } ginkgo.By(fmt.Sprintf("Deleting Storage Class %s", l.scName)) - err := l.cs.StorageV1().StorageClasses().Delete(context.TODO(), l.scName, metav1.DeleteOptions{}) + err := l.cs.StorageV1().StorageClasses().Delete(ctx, l.scName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -149,7 +149,7 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri ns: f.Namespace, options: dInfo.PerformanceTestOptions, } - l.config = driver.PrepareTest(f) + l.config = driver.PrepareTest(ctx, f) // Stats for volume provisioning operation // TODO: Add stats for attach, resize and snapshot @@ -158,21 +158,21 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri perObjectInterval: make(map[string]*interval), operationMetrics: &storageframework.Metrics{}, } - sc := driver.(storageframework.DynamicPVTestDriver).GetDynamicProvisionStorageClass(l.config, pattern.FsType) + sc := driver.(storageframework.DynamicPVTestDriver).GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType) ginkgo.By(fmt.Sprintf("Creating Storage Class %v", sc)) // TODO: Add support for WaitForFirstConsumer volume binding mode if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { e2eskipper.Skipf("WaitForFirstConsumer binding mode currently not supported for performance tests") } ginkgo.By(fmt.Sprintf("Creating Storage Class %s", sc.Name)) - sc, err := l.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) + sc, err := l.cs.StorageV1().StorageClasses().Create(ctx, sc, metav1.CreateOptions{}) framework.ExpectNoError(err) l.scName = sc.Name // Create a controller to watch on PVCs // When all PVCs provisioned by this test are in the Bound state, the controller // sends a signal to the channel - controller := newPVCWatch(f, l.options.ProvisioningOptions.Count, provisioningStats) + controller := newPVCWatch(ctx, f, l.options.ProvisioningOptions.Count, provisioningStats) l.stopCh = make(chan struct{}) go controller.Run(l.stopCh) waitForProvisionCh = make(chan []*v1.PersistentVolumeClaim) @@ -183,7 +183,7 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri ClaimSize: l.options.ProvisioningOptions.VolumeSize, StorageClassName: &sc.Name, }, l.ns.Name) - pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) // Store create time for each PVC provisioningStats.mutex.Lock() @@ -256,7 +256,7 @@ func validatePerformanceStats(operationMetrics *storageframework.Metrics, baseli // newPVCWatch creates an informer to check whether all PVCs are Bound // When all PVCs are bound, the controller sends a signal to // waitForProvisionCh to unblock the test -func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *performanceStats) cache.Controller { +func newPVCWatch(ctx context.Context, f *framework.Framework, provisionCount int, pvcMetrics *performanceStats) cache.Controller { defer ginkgo.GinkgoRecover() count := 0 countLock := &sync.Mutex{} @@ -290,11 +290,11 @@ func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *perform _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{}) + obj, err := f.ClientSet.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{}) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().PersistentVolumeClaims(ns).Watch(context.TODO(), metav1.ListOptions{}) + return f.ClientSet.CoreV1().PersistentVolumeClaims(ns).Watch(ctx, metav1.ListOptions{}) }, }, &v1.PersistentVolumeClaim{}, diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 6cdc6f353b9..ffefc1e7720 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -132,32 +132,32 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte f := framework.NewFrameworkWithCustomTimeouts("volume", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { l = local{} // Now do the more expensive test initialization. - l.config = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + l.config = driver.PrepareTest(ctx, f) + l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange) if l.resource.VolSource == nil { e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } } - cleanup := func() { + cleanup := func(ctx context.Context) { var errs []error if l.resource != nil { - errs = append(errs, l.resource.CleanupResource()) + errs = append(errs, l.resource.CleanupResource(ctx)) l.resource = nil } framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") - l.migrationCheck.validateMigrationVolumeOpCounts() + l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } ginkgo.It("should store data", func(ctx context.Context) { - init() + init(ctx) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, storageframework.ConvertTestConfig(l.config)) ginkgo.DeferCleanup(cleanup) @@ -181,9 +181,9 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte // local), plugin skips setting fsGroup if volume is already mounted // and we don't have reliable way to detect volumes are unmounted or // not before starting the second pod. - e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests) + e2evolume.InjectContent(ctx, f, config, fsGroup, pattern.FsType, tests) if driver.GetDriverInfo().Capabilities[storageframework.CapPersistence] { - e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests) + e2evolume.TestVolumeClient(ctx, f, config, fsGroup, pattern.FsType, tests) } else { ginkgo.By("Skipping persistence check for non-persistent volume") } @@ -193,15 +193,16 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte if pattern.VolMode != v1.PersistentVolumeBlock { ginkgo.It("should allow exec of files on the volume", func(ctx context.Context) { skipExecTest(driver) - init() + init(ctx) ginkgo.DeferCleanup(cleanup) - testScriptInPod(f, string(pattern.VolType), l.resource.VolSource, l.config) + testScriptInPod(ctx, f, string(pattern.VolType), l.resource.VolSource, l.config) }) } } func testScriptInPod( + ctx context.Context, f *framework.Framework, volumeType string, source *v1.VolumeSource, @@ -250,10 +251,10 @@ func testScriptInPod( } e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) - e2eoutput.TestContainerOutput(f, "exec-volume-test", pod, 0, []string{fileName}) + e2eoutput.TestContainerOutput(ctx, f, "exec-volume-test", pod, 0, []string{fileName}) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := e2epod.DeletePodWithWait(f.ClientSet, pod) + err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) framework.ExpectNoError(err, "while deleting pod") } diff --git a/test/e2e/storage/ubernetes_lite_volumes.go b/test/e2e/storage/ubernetes_lite_volumes.go index 4ce3d61317a..ca8c8dffbbd 100644 --- a/test/e2e/storage/ubernetes_lite_volumes.go +++ b/test/e2e/storage/ubernetes_lite_volumes.go @@ -42,10 +42,10 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() { var zoneCount int var err error image := framework.ServeHostnameImage - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") if zoneCount <= 0 { - zoneCount, err = getZoneCount(f.ClientSet) + zoneCount, err = getZoneCount(ctx, f.ClientSet) framework.ExpectNoError(err) } ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) @@ -54,13 +54,13 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() { // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread }) ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func(ctx context.Context) { - PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) + PodsUseStaticPVsOrFail(ctx, f, (2*zoneCount)+1, image) }) }) // Return the number of zones in which we have nodes in this cluster. -func getZoneCount(c clientset.Interface) (int, error) { - zoneNames, err := e2enode.GetSchedulableClusterZones(c) +func getZoneCount(ctx context.Context, c clientset.Interface) (int, error) { + zoneNames, err := e2enode.GetSchedulableClusterZones(ctx, c) if err != nil { return -1, err } @@ -76,12 +76,12 @@ type staticPVTestConfig struct { // PodsUseStaticPVsOrFail Check that the pods using statically // created PVs get scheduled to the same zone that the PV is in. -func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) { +func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCount int, image string) { var err error c := f.ClientSet ns := f.Namespace.Name - zones, err := e2enode.GetSchedulableClusterZones(c) + zones, err := e2enode.GetSchedulableClusterZones(ctx, c) framework.ExpectNoError(err) zonelist := zones.List() ginkgo.By("Creating static PVs across zones") @@ -93,7 +93,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("Cleaning up pods and PVs") for _, config := range configs { - e2epod.DeletePodOrFail(c, ns, config.pod.Name) + e2epod.DeletePodOrFail(ctx, c, ns, config.pod.Name) } var wg sync.WaitGroup wg.Add(len(configs)) @@ -101,11 +101,11 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) go func(config *staticPVTestConfig) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := e2epod.WaitForPodToDisappear(c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) + err := e2epod.WaitForPodToDisappear(ctx, c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) framework.ExpectNoError(err, "while waiting for pod to disappear") - errs := e2epv.PVPVCCleanup(c, ns, config.pv, config.pvc) + errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc) framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs") - err = e2epv.DeletePVSource(config.pvSource) + err = e2epv.DeletePVSource(ctx, config.pvSource) framework.ExpectNoError(err, "while deleting PVSource") }(configs[i]) } @@ -114,7 +114,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) for i, config := range configs { zone := zonelist[i%len(zones)] - config.pvSource, err = e2epv.CreatePVSource(zone) + config.pvSource, err = e2epv.CreatePVSource(ctx, zone) framework.ExpectNoError(err) pvConfig := e2epv.PersistentVolumeConfig{ @@ -125,25 +125,25 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) className := "" pvcConfig := e2epv.PersistentVolumeClaimConfig{StorageClassName: &className} - config.pv, config.pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) + config.pv, config.pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) } ginkgo.By("Waiting for all PVCs to be bound") for _, config := range configs { - e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, config.pv, config.pvc) + e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, config.pv, config.pvc) } ginkgo.By("Creating pods for each static PV") for _, config := range configs { podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), podConfig, metav1.CreateOptions{}) + config.pod, err = c.CoreV1().Pods(ns).Create(ctx, podConfig, metav1.CreateOptions{}) framework.ExpectNoError(err) } ginkgo.By("Waiting for all pods to be running") for _, config := range configs { - err = e2epod.WaitForPodRunningInNamespace(c, config.pod) + err = e2epod.WaitForPodRunningInNamespace(ctx, c, config.pod) framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index c7b24058e1f..8131908468e 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -141,7 +141,7 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ... // PatchItems has the some limitations as LoadFromManifests: // - only some common items are supported, unknown ones trigger an error // - only the latest stable API version for each item is supported -func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) error { +func CreateItems(ctx context.Context, f *framework.Framework, ns *v1.Namespace, items ...interface{}) error { var result error for _, item := range items { // Each factory knows which item(s) it supports, so try each one. @@ -151,7 +151,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) // description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item)) framework.Logf("creating %s", description) for _, factory := range factories { - destructor, err := factory.Create(f, ns, item) + destructor, err := factory.Create(ctx, f, ns, item) if destructor != nil { ginkgo.DeferCleanup(framework.IgnoreNotFound(destructor), framework.AnnotatedLocation(fmt.Sprintf("deleting %s", description))) } @@ -175,7 +175,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) // CreateFromManifests is a combination of LoadFromManifests, // PatchItems, patching with an optional custom function, // and CreateItems. -func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error { +func CreateFromManifests(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error { items, err := LoadFromManifests(files...) if err != nil { return fmt.Errorf("CreateFromManifests: %w", err) @@ -190,7 +190,7 @@ func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, } } } - return CreateItems(f, driverNamespace, items...) + return CreateItems(ctx, f, driverNamespace, items...) } // What is a subset of metav1.TypeMeta which (in contrast to @@ -230,7 +230,7 @@ type ItemFactory interface { // error or a cleanup function for the created item. // If the item is of an unsupported type, it must return // an error that has errorItemNotSupported as cause. - Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error) + Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, item interface{}) (func(ctx context.Context) error, error) } // describeItem always returns a string that describes the item, @@ -389,17 +389,17 @@ func (f *serviceAccountFactory) New() runtime.Object { return &v1.ServiceAccount{} } -func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*serviceAccountFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*v1.ServiceAccount) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create ServiceAccount: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -409,7 +409,7 @@ func (f *clusterRoleFactory) New() runtime.Object { return &rbacv1.ClusterRole{} } -func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*clusterRoleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*rbacv1.ClusterRole) if !ok { return nil, errorItemNotSupported @@ -417,11 +417,11 @@ func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i in framework.Logf("Define cluster role %v", item.GetName()) client := f.ClientSet.RbacV1().ClusterRoles() - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create ClusterRole: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -431,18 +431,18 @@ func (f *clusterRoleBindingFactory) New() runtime.Object { return &rbacv1.ClusterRoleBinding{} } -func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*clusterRoleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*rbacv1.ClusterRoleBinding) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.RbacV1().ClusterRoleBindings() - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create ClusterRoleBinding: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -452,18 +452,18 @@ func (f *roleFactory) New() runtime.Object { return &rbacv1.Role{} } -func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*roleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*rbacv1.Role) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.RbacV1().Roles(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create Role: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -473,18 +473,18 @@ func (f *roleBindingFactory) New() runtime.Object { return &rbacv1.RoleBinding{} } -func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*roleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*rbacv1.RoleBinding) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.RbacV1().RoleBindings(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create RoleBinding: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -494,18 +494,18 @@ func (f *serviceFactory) New() runtime.Object { return &v1.Service{} } -func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*serviceFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*v1.Service) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.CoreV1().Services(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create Service: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -515,18 +515,18 @@ func (f *statefulSetFactory) New() runtime.Object { return &appsv1.StatefulSet{} } -func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*statefulSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*appsv1.StatefulSet) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.AppsV1().StatefulSets(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create StatefulSet: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -536,18 +536,18 @@ func (f *deploymentFactory) New() runtime.Object { return &appsv1.Deployment{} } -func (*deploymentFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*deploymentFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*appsv1.Deployment) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.AppsV1().Deployments(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create Deployment: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -557,18 +557,18 @@ func (f *daemonSetFactory) New() runtime.Object { return &appsv1.DaemonSet{} } -func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*daemonSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*appsv1.DaemonSet) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.AppsV1().DaemonSets(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create DaemonSet: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -578,18 +578,18 @@ func (f *replicaSetFactory) New() runtime.Object { return &appsv1.ReplicaSet{} } -func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*replicaSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*appsv1.ReplicaSet) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.AppsV1().ReplicaSets(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create ReplicaSet: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -599,18 +599,18 @@ func (f *storageClassFactory) New() runtime.Object { return &storagev1.StorageClass{} } -func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*storagev1.StorageClass) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.StorageV1().StorageClasses() - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create StorageClass: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -620,18 +620,18 @@ func (f *csiDriverFactory) New() runtime.Object { return &storagev1.CSIDriver{} } -func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*csiDriverFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*storagev1.CSIDriver) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.StorageV1().CSIDrivers() - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create CSIDriver: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -641,18 +641,18 @@ func (f *secretFactory) New() runtime.Object { return &v1.Secret{} } -func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*secretFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { item, ok := i.(*v1.Secret) if !ok { return nil, errorItemNotSupported } client := f.ClientSet.CoreV1().Secrets(ns.Name) - if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil { + if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create Secret: %w", err) } - return func() error { - return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -662,7 +662,7 @@ func (f *customResourceDefinitionFactory) New() runtime.Object { return &apiextensionsv1.CustomResourceDefinition{} } -func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) { +func (*customResourceDefinitionFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { var err error unstructCRD := &unstructured.Unstructured{} gvr := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"} @@ -677,11 +677,11 @@ func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Na return nil, err } - if _, err = f.DynamicClient.Resource(gvr).Create(context.TODO(), unstructCRD, metav1.CreateOptions{}); err != nil { + if _, err = f.DynamicClient.Resource(gvr).Create(ctx, unstructCRD, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create CustomResourceDefinition: %w", err) } - return func() error { - return f.DynamicClient.Resource(gvr).Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) + return func(ctx context.Context) error { + return f.DynamicClient.Resource(gvr).Delete(ctx, item.GetName(), metav1.DeleteOptions{}) }, nil } diff --git a/test/e2e/storage/utils/host_exec.go b/test/e2e/storage/utils/host_exec.go index edefee588b6..31a89e267d4 100644 --- a/test/e2e/storage/utils/host_exec.go +++ b/test/e2e/storage/utils/host_exec.go @@ -47,10 +47,10 @@ func LogResult(result Result) { // HostExec represents interface we require to execute commands on remote host. type HostExec interface { - Execute(cmd string, node *v1.Node) (Result, error) - IssueCommandWithResult(cmd string, node *v1.Node) (string, error) - IssueCommand(cmd string, node *v1.Node) error - Cleanup() + Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) + IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) + IssueCommand(ctx context.Context, cmd string, node *v1.Node) error + Cleanup(ctx context.Context) } // hostExecutor implements HostExec @@ -69,7 +69,7 @@ func NewHostExec(framework *framework.Framework) HostExec { // launchNodeExecPod launches a hostexec pod for local PV and waits // until it's Running. -func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { +func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.Pod { f := h.Framework cs := f.ClientSet ns := f.Namespace @@ -104,9 +104,9 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { return &privileged }(true), } - pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{}) + pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err) return pod } @@ -115,8 +115,8 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { // performing the remote command execution, the stdout, stderr and exit code // are returned. // This works like ssh.SSH(...) utility. -func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) { - result, err := h.exec(cmd, node) +func (h *hostExecutor) Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) { + result, err := h.exec(ctx, cmd, node) if codeExitErr, ok := err.(exec.CodeExitError); ok { // extract the exit code of remote command and silence the command // non-zero exit code error @@ -126,14 +126,14 @@ func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) { return result, err } -func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) { +func (h *hostExecutor) exec(ctx context.Context, cmd string, node *v1.Node) (Result, error) { result := Result{ Host: node.Name, Cmd: cmd, } pod, ok := h.nodeExecPods[node.Name] if !ok { - pod = h.launchNodeExecPod(node.Name) + pod = h.launchNodeExecPod(ctx, node.Name) if pod == nil { return result, fmt.Errorf("failed to create hostexec pod for node %q", node) } @@ -165,8 +165,8 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) { // IssueCommandWithResult issues command on the given node and returns stdout as // result. It returns error if there are some issues executing the command or // the command exits non-zero. -func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) { - result, err := h.exec(cmd, node) +func (h *hostExecutor) IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) { + result, err := h.exec(ctx, cmd, node) if err != nil { LogResult(result) } @@ -174,17 +174,17 @@ func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string } // IssueCommand works like IssueCommandWithResult, but discards result. -func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error { - _, err := h.IssueCommandWithResult(cmd, node) +func (h *hostExecutor) IssueCommand(ctx context.Context, cmd string, node *v1.Node) error { + _, err := h.IssueCommandWithResult(ctx, cmd, node) return err } // Cleanup cleanup resources it created during test. // Note that in most cases it is not necessary to call this because we create // pods under test namespace which will be destroyed in teardown phase. -func (h *hostExecutor) Cleanup() { +func (h *hostExecutor) Cleanup(ctx context.Context) { for _, pod := range h.nodeExecPods { - e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name) + e2epod.DeletePodOrFail(ctx, h.Framework.ClientSet, pod.Namespace, pod.Name) } h.nodeExecPods = make(map[string]*v1.Pod) } diff --git a/test/e2e/storage/utils/local.go b/test/e2e/storage/utils/local.go index ef3e8188872..df96eac124e 100644 --- a/test/e2e/storage/utils/local.go +++ b/test/e2e/storage/utils/local.go @@ -21,6 +21,7 @@ package utils */ import ( + "context" "fmt" "path/filepath" "strings" @@ -69,9 +70,9 @@ type LocalTestResource struct { // LocalTestResourceManager represents interface to create/destroy local test resources on node type LocalTestResourceManager interface { - Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource - ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error - Remove(ltr *LocalTestResource) + Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource + ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error + Remove(ctx context.Context, ltr *LocalTestResource) } // ltrMgr implements LocalTestResourceManager @@ -98,10 +99,10 @@ func (l *ltrMgr) getTestDir() string { return filepath.Join(l.hostBase, testDirName) } -func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeTmpfs(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir)) - err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node) + err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -109,18 +110,18 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri } } -func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeTmpfs(ctx context.Context, ltr *LocalTestResource) { ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path)) - err := l.hostExec.IssueCommand(fmt.Sprintf("umount %q", ltr.Path), ltr.Node) + err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("umount %q", ltr.Path), ltr.Node) framework.ExpectNoError(err) ginkgo.By("Removing the test directory") - err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node) + err = l.hostExec.IssueCommand(ctx, fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node) framework.ExpectNoError(err) } // createAndSetupLoopDevice creates an empty file and associates a loop devie with it. -func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) { +func (l *ltrMgr) createAndSetupLoopDevice(ctx context.Context, dir string, node *v1.Node, size int) { ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir)) mkdirCmd := fmt.Sprintf("mkdir -p %s", dir) count := size / 4096 @@ -130,22 +131,22 @@ func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) { } ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count) losetupCmd := fmt.Sprintf("losetup -f %s/file", dir) - err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node) + err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node) framework.ExpectNoError(err) } // findLoopDevice finds loop device path by its associated storage directory. -func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string { +func (l *ltrMgr) findLoopDevice(ctx context.Context, dir string, node *v1.Node) string { cmd := fmt.Sprintf("E2E_LOOP_DEV=$(losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir) - loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node) + loopDevResult, err := l.hostExec.IssueCommandWithResult(ctx, cmd, node) framework.ExpectNoError(err) return strings.TrimSpace(loopDevResult) } -func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeBlock(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { loopDir := l.getTestDir() - l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024) - loopDev := l.findLoopDevice(loopDir, node) + l.createAndSetupLoopDevice(ctx, loopDir, node, 20*1024*1024) + loopDev := l.findLoopDevice(ctx, loopDir, node) return &LocalTestResource{ Node: node, Path: loopDev, @@ -154,30 +155,30 @@ func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]stri } // teardownLoopDevice tears down loop device by its associated storage directory. -func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) { - loopDev := l.findLoopDevice(dir, node) +func (l *ltrMgr) teardownLoopDevice(ctx context.Context, dir string, node *v1.Node) { + loopDev := l.findLoopDevice(ctx, dir, node) ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir)) losetupDeleteCmd := fmt.Sprintf("losetup -d %s", loopDev) - err := l.hostExec.IssueCommand(losetupDeleteCmd, node) + err := l.hostExec.IssueCommand(ctx, losetupDeleteCmd, node) framework.ExpectNoError(err) return } -func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) { - l.teardownLoopDevice(ltr.loopDir, ltr.Node) +func (l *ltrMgr) cleanupLocalVolumeBlock(ctx context.Context, ltr *LocalTestResource) { + l.teardownLoopDevice(ctx, ltr.loopDir, ltr.Node) ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir)) removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource { - ltr := l.setupLocalVolumeBlock(node, parameters) +func (l *ltrMgr) setupLocalVolumeBlockFS(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { + ltr := l.setupLocalVolumeBlock(ctx, node, parameters) loopDev := ltr.Path loopDir := ltr.loopDir // Format and mount at loopDir and give others rwx for read/write testing cmd := fmt.Sprintf("mkfs -t ext4 %s && mount -t ext4 %s %s && chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir) - err := l.hostExec.IssueCommand(cmd, node) + err := l.hostExec.IssueCommand(ctx, cmd, node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -186,17 +187,17 @@ func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]st } } -func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource) { umountCmd := fmt.Sprintf("umount %s", ltr.Path) - err := l.hostExec.IssueCommand(umountCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, umountCmd, ltr.Node) framework.ExpectNoError(err) - l.cleanupLocalVolumeBlock(ltr) + l.cleanupLocalVolumeBlock(ctx, ltr) } -func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeDirectory(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir) - err := l.hostExec.IssueCommand(mkdirCmd, node) + err := l.hostExec.IssueCommand(ctx, mkdirCmd, node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -204,18 +205,18 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string] } } -func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeDirectory(ctx context.Context, ltr *LocalTestResource) { ginkgo.By("Removing the test directory") removeCmd := fmt.Sprintf("rm -r %s", ltr.Path) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeDirectoryLink(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() hostDirBackend := hostDir + "-backend" cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir) - err := l.hostExec.IssueCommand(cmd, node) + err := l.hostExec.IssueCommand(ctx, cmd, node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -223,19 +224,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str } } -func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ctx context.Context, ltr *LocalTestResource) { ginkgo.By("Removing the test directory") hostDir := ltr.Path hostDirBackend := hostDir + "-backend" removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir) - err := l.hostExec.IssueCommand(cmd, node) + err := l.hostExec.IssueCommand(ctx, cmd, node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -243,19 +244,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters } } -func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ctx context.Context, ltr *LocalTestResource) { ginkgo.By("Removing the test directory") hostDir := ltr.Path removeCmd := fmt.Sprintf("umount %s && rm -r %s", hostDir, hostDir) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { hostDir := l.getTestDir() hostDirBackend := hostDir + "-backend" cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir) - err := l.hostExec.IssueCommand(cmd, node) + err := l.hostExec.IssueCommand(ctx, cmd, node) framework.ExpectNoError(err) return &LocalTestResource{ Node: node, @@ -263,17 +264,17 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet } } -func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, ltr *LocalTestResource) { ginkgo.By("Removing the test directory") hostDir := ltr.Path hostDirBackend := hostDir + "-backend" removeCmd := fmt.Sprintf("rm %s && umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource { - res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node) +func (l *ltrMgr) setupLocalVolumeGCELocalSSD(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource { + res, err := l.hostExec.IssueCommandWithResult(ctx, "ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node) framework.ExpectNoError(err) dirName := strings.Fields(res)[0] hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName @@ -283,47 +284,47 @@ func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[strin } } -func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) { +func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ctx context.Context, ltr *LocalTestResource) { // This filesystem is attached in cluster initialization, we clean all files to make it reusable. removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path) - err := l.hostExec.IssueCommand(removeCmd, ltr.Node) + err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node) framework.ExpectNoError(err) } -func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error { +func (l *ltrMgr) expandLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error { ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd) - loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node) + loopDev := l.findLoopDevice(ctx, ltr.loopDir, ltr.Node) losetupCmd := fmt.Sprintf("losetup -c %s", loopDev) - return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node) + return l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node) } -func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error { +func (l *ltrMgr) ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbtoAdd int) error { switch ltr.VolumeType { case LocalVolumeBlockFS: - return l.expandLocalVolumeBlockFS(ltr, mbtoAdd) + return l.expandLocalVolumeBlockFS(ctx, ltr, mbtoAdd) } return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType) } -func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource { +func (l *ltrMgr) Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource { var ltr *LocalTestResource switch volumeType { case LocalVolumeDirectory: - ltr = l.setupLocalVolumeDirectory(node, parameters) + ltr = l.setupLocalVolumeDirectory(ctx, node, parameters) case LocalVolumeDirectoryLink: - ltr = l.setupLocalVolumeDirectoryLink(node, parameters) + ltr = l.setupLocalVolumeDirectoryLink(ctx, node, parameters) case LocalVolumeDirectoryBindMounted: - ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters) + ltr = l.setupLocalVolumeDirectoryBindMounted(ctx, node, parameters) case LocalVolumeDirectoryLinkBindMounted: - ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters) + ltr = l.setupLocalVolumeDirectoryLinkBindMounted(ctx, node, parameters) case LocalVolumeTmpfs: - ltr = l.setupLocalVolumeTmpfs(node, parameters) + ltr = l.setupLocalVolumeTmpfs(ctx, node, parameters) case LocalVolumeBlock: - ltr = l.setupLocalVolumeBlock(node, parameters) + ltr = l.setupLocalVolumeBlock(ctx, node, parameters) case LocalVolumeBlockFS: - ltr = l.setupLocalVolumeBlockFS(node, parameters) + ltr = l.setupLocalVolumeBlockFS(ctx, node, parameters) case LocalVolumeGCELocalSSD: - ltr = l.setupLocalVolumeGCELocalSSD(node, parameters) + ltr = l.setupLocalVolumeGCELocalSSD(ctx, node, parameters) default: framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType) return nil @@ -335,24 +336,24 @@ func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters ma return ltr } -func (l *ltrMgr) Remove(ltr *LocalTestResource) { +func (l *ltrMgr) Remove(ctx context.Context, ltr *LocalTestResource) { switch ltr.VolumeType { case LocalVolumeDirectory: - l.cleanupLocalVolumeDirectory(ltr) + l.cleanupLocalVolumeDirectory(ctx, ltr) case LocalVolumeDirectoryLink: - l.cleanupLocalVolumeDirectoryLink(ltr) + l.cleanupLocalVolumeDirectoryLink(ctx, ltr) case LocalVolumeDirectoryBindMounted: - l.cleanupLocalVolumeDirectoryBindMounted(ltr) + l.cleanupLocalVolumeDirectoryBindMounted(ctx, ltr) case LocalVolumeDirectoryLinkBindMounted: - l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr) + l.cleanupLocalVolumeDirectoryLinkBindMounted(ctx, ltr) case LocalVolumeTmpfs: - l.cleanupLocalVolumeTmpfs(ltr) + l.cleanupLocalVolumeTmpfs(ctx, ltr) case LocalVolumeBlock: - l.cleanupLocalVolumeBlock(ltr) + l.cleanupLocalVolumeBlock(ctx, ltr) case LocalVolumeBlockFS: - l.cleanupLocalVolumeBlockFS(ltr) + l.cleanupLocalVolumeBlockFS(ctx, ltr) case LocalVolumeGCELocalSSD: - l.cleanupLocalVolumeGCELocalSSD(ltr) + l.cleanupLocalVolumeGCELocalSSD(ctx, ltr) default: framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType) } diff --git a/test/e2e/storage/utils/pod.go b/test/e2e/storage/utils/pod.go index 0c9307d2777..b971bd152eb 100644 --- a/test/e2e/storage/utils/pod.go +++ b/test/e2e/storage/utils/pod.go @@ -43,8 +43,8 @@ import ( // // The output goes to log files (when using --report-dir, as in the // CI) or the output stream (otherwise). -func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() { - ctx, cancel := context.WithCancel(context.Background()) +func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace) func() { + ctx, cancel := context.WithCancel(ctx) cs := f.ClientSet ns := driverNamespace.Name @@ -103,17 +103,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() // - If `systemctl` returns stderr "command not found, issues the command via `service` // - If `service` also returns stderr "command not found", the test is aborted. // Allowed kubeletOps are `KStart`, `KStop`, and `KRestart` -func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { +func KubeletCommand(ctx context.Context, kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { command := "" systemctlPresent := false kubeletPid := "" - nodeIP, err := getHostAddress(c, pod) + nodeIP, err := getHostAddress(ctx, c, pod) framework.ExpectNoError(err) nodeIP = nodeIP + ":22" framework.Logf("Checking if systemctl command is present") - sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) + sshResult, err := e2essh.SSH(ctx, "systemctl --version", nodeIP, framework.TestContext.Provider) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) if !strings.Contains(sshResult.Stderr, "command not found") { command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) @@ -122,23 +122,23 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { command = fmt.Sprintf("service kubelet %s", string(kOp)) } - sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider) + sudoPresent := isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) if sudoPresent { command = fmt.Sprintf("sudo %s", command) } if kOp == KRestart { - kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + kubeletPid = getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent) } framework.Logf("Attempting `%s`", command) - sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) + sshResult, err = e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) e2essh.LogResult(sshResult) gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) if kOp == KStop { - if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + if ok := e2enode.WaitForNodeToBeNotReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok { framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) } } @@ -146,7 +146,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { // Wait for a minute to check if kubelet Pid is getting changed isPidChanged := false for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) { - kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + if ctx.Err() != nil { + framework.Fail("timed out waiting for Kubelet POD change") + } + kubeletPidAfterRestart := getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent) if kubeletPid != kubeletPidAfterRestart { isPidChanged = true break @@ -161,7 +164,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { } if kOp == KStart || kOp == KRestart { // For kubelet start and restart operations, Wait until Node becomes Ready - if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok { framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) } } @@ -170,8 +173,8 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { // getHostAddress gets the node for a pod and returns the first // address. Returns an error if the node the pod is on doesn't have an // address. -func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) +func getHostAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (string, error) { + node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/test/e2e/storage/utils/snapshot.go b/test/e2e/storage/utils/snapshot.go index aff9e20efb3..a721bd1dc64 100644 --- a/test/e2e/storage/utils/snapshot.go +++ b/test/e2e/storage/utils/snapshot.go @@ -48,11 +48,11 @@ var ( ) // WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. -func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { +func WaitForSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName) if successful := WaitUntil(poll, timeout, func() bool { - snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{}) + snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(ctx, snapshotName, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err) return false @@ -80,12 +80,12 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p // GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a // given VolumeSnapshot -func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured { +func GetSnapshotContentFromSnapshot(ctx context.Context, dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured { defer ginkgo.GinkgoRecover() - err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout) + err := WaitForSnapshotReady(ctx, dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout) framework.ExpectNoError(err) - vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{}) + vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(ctx, snapshot.GetName(), metav1.GetOptions{}) snapshotStatus := vs.Object["status"].(map[string]interface{}) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) @@ -93,7 +93,7 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured framework.Logf("snapshotContentName %s", snapshotContentName) framework.ExpectNoError(err) - vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + vscontent, err := dc.Resource(SnapshotContentGVR).Get(ctx, snapshotContentName, metav1.GetOptions{}) framework.ExpectNoError(err) return vscontent @@ -101,9 +101,9 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured } // DeleteSnapshotWithoutWaiting deletes a VolumeSnapshot and return directly without waiting -func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName string) error { +func DeleteSnapshotWithoutWaiting(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string) error { ginkgo.By("deleting the snapshot") - err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{}) + err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(ctx, snapshotName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -111,15 +111,15 @@ func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName } // DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first -func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { +func DeleteAndWaitSnapshot(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { var err error - err = DeleteSnapshotWithoutWaiting(dc, ns, snapshotName) + err = DeleteSnapshotWithoutWaiting(ctx, dc, ns, snapshotName) if err != nil { return err } ginkgo.By("checking the Snapshot has been deleted") - err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout) + err = WaitForNamespacedGVRDeletion(ctx, dc, SnapshotGVR, ns, snapshotName, poll, timeout) return err } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 13d8dbf5d8e..2c4fc261f5b 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -75,7 +75,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string } // getKubeletMainPid return the Main PID of the Kubelet Process -func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string { +func getKubeletMainPid(ctx context.Context, nodeIP string, sudoPresent bool, systemctlPresent bool) string { command := "" if systemctlPresent { command = "systemctl status kubelet | grep 'Main PID'" @@ -86,7 +86,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s command = fmt.Sprintf("sudo %s", command) } framework.Logf("Attempting `%s`", command) - sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider) + sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP)) e2essh.LogResult(sshResult) gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID") @@ -95,7 +95,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s } // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts -func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { +func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { byteLen := 64 seed := time.Now().UTC().UnixNano() @@ -103,7 +103,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed) ginkgo.By("Restarting kubelet") - KubeletCommand(KRestart, c, clientPod) + KubeletCommand(ctx, KRestart, c, clientPod) ginkgo.By("Testing that written file is accessible.") CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed) @@ -112,7 +112,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra } // TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts -func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { +func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { byteLen := 64 seed := time.Now().UTC().UnixNano() @@ -120,7 +120,7 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed) ginkgo.By("Restarting kubelet") - KubeletCommand(KRestart, c, clientPod) + KubeletCommand(ctx, KRestart, c, clientPod) ginkgo.By("Testing that written pv is accessible.") CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed) @@ -132,20 +132,20 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame // forceDelete is true indicating whether the pod is forcefully deleted. // checkSubpath is true indicating whether the subpath should be checked. // If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting. -func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) { - nodeIP, err := getHostAddress(c, clientPod) +func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) { + nodeIP, err := getHostAddress(ctx, c, clientPod) framework.ExpectNoError(err) nodeIP = nodeIP + ":22" ginkgo.By("Expecting the volume mount to be found.") - result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) if checkSubpath { ginkgo.By("Expecting the volume subpath mount to be found.") - result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) @@ -159,7 +159,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f // This command is to make sure kubelet is started after test finishes no matter it fails or not. ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod) ginkgo.By("Stopping the kubelet.") - KubeletCommand(KStop, c, clientPod) + KubeletCommand(ctx, KStop, c, clientPod) if secondPod != nil { ginkgo.By("Starting the second pod") @@ -169,15 +169,15 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{}) } framework.ExpectNoError(err) ginkgo.By("Starting the kubelet and waiting for pod to delete.") - KubeletCommand(KStart, c, clientPod) - err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) + KubeletCommand(ctx, KStart, c, clientPod) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) if err != nil { framework.ExpectNoError(err, "Expected pod to be not found.") } @@ -190,7 +190,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f if secondPod != nil { ginkgo.By("Waiting for the second pod.") - err = e2epod.WaitForPodRunningInNamespace(c, secondPod) + err = e2epod.WaitForPodRunningInNamespace(ctx, c, secondPod) framework.ExpectNoError(err, "while waiting for the second pod Running") ginkgo.By("Getting the second pod uuid.") @@ -198,7 +198,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f framework.ExpectNoError(err, "getting the second UID") ginkgo.By("Expecting the volume mount to be found in the second pod.") - result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) @@ -207,12 +207,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed) err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "when deleting the second pod") - err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "when waiting for the second pod to disappear") } ginkgo.By("Expecting the volume mount not to be found.") - result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") @@ -220,7 +220,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f if checkSubpath { ginkgo.By("Expecting the volume subpath mount not to be found.") - result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") @@ -230,42 +230,42 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f } // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. -func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { - TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath) +func TestVolumeUnmountsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { + TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, false, nil, volumePath) } // TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down. -func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { - TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath) +func TestVolumeUnmountsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) { + TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, false, nil, volumePath) } // TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down. // forceDelete is true indicating whether the pod is forcefully deleted. -func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) { - nodeIP, err := getHostAddress(c, clientPod) +func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) { + nodeIP, err := getHostAddress(ctx, c, clientPod) framework.ExpectNoError(err, "Failed to get nodeIP.") nodeIP = nodeIP + ":22" // Creating command to check whether path exists podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID) - if isSudoPresent(nodeIP, framework.TestContext.Provider) { + if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) { podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd) } // Directories in the global directory have unpredictable names, however, device symlinks // have the same name as pod.UID. So just find anything with pod.UID name. globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID) - if isSudoPresent(nodeIP, framework.TestContext.Provider) { + if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) { globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd) } ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.") - result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) ginkgo.By("Expecting the symlinks from global map path to be found.") - result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code)) @@ -273,19 +273,19 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra // This command is to make sure kubelet is started after test finishes no matter it fails or not. ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod) ginkgo.By("Stopping the kubelet.") - KubeletCommand(KStop, c, clientPod) + KubeletCommand(ctx, KStop, c, clientPod) ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{}) } framework.ExpectNoError(err, "Failed to delete pod.") ginkgo.By("Starting the kubelet and waiting for pod to delete.") - KubeletCommand(KStart, c, clientPod) - err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) + KubeletCommand(ctx, KStart, c, clientPod) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete) framework.ExpectNoError(err, "Expected pod to be not found.") if forceDelete { @@ -295,13 +295,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra } ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.") - result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.") ginkgo.By("Expecting the symlinks from global map path not to be found.") - result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.") @@ -310,17 +310,17 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra } // TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down. -func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) { - TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath) +func TestVolumeUnmapsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) { + TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, devicePath) } // TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down. -func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) { - TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath) +func TestVolumeUnmapsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) { + TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, devicePath) } // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) { +func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -358,14 +358,14 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, }, }, } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod: %v", err) ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name) - framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow)) } // StartExternalProvisioner create external provisioner pod -func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod { +func StartExternalProvisioner(ctx context.Context, c clientset.Interface, ns string, externalPluginName string) *v1.Pod { podClient := c.CoreV1().Pods(ns) provisionerPod := &v1.Pod{ @@ -426,21 +426,21 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa }, }, } - provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{}) + provisionerPod, err := podClient.Create(ctx, provisionerPod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, c, provisionerPod)) ginkgo.By("locating the provisioner pod") - pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(ctx, provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) return pod } -func isSudoPresent(nodeIP string, provider string) bool { +func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool { framework.Logf("Checking if sudo command is present") - sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider) + sshResult, err := e2essh.SSH(ctx, "sudo --version", nodeIP, provider) framework.ExpectNoError(err, "SSH to %q errored.", nodeIP) if !strings.Contains(sshResult.Stderr, "command not found") { return true @@ -556,8 +556,8 @@ func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int { } // findMountPoints returns all mount points on given node under specified directory. -func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string { - result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node) +func findMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node, dir string) []string { + result, err := hostExec.IssueCommandWithResult(ctx, fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node) framework.ExpectNoError(err, "Encountered HostExec error.") var mountPoints []string if err != nil { @@ -572,16 +572,16 @@ func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string { } // FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod. -func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String { - return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...) +func FindVolumeGlobalMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node) sets.String { + return sets.NewString(findMountPoints(ctx, hostExec, node, "/var/lib/kubelet/plugins")...) } // CreateDriverNamespace creates a namespace for CSI driver installation. // The namespace is still tracked and ensured that gets deleted when test terminates. -func CreateDriverNamespace(f *framework.Framework) *v1.Namespace { +func CreateDriverNamespace(ctx context.Context, f *framework.Framework) *v1.Namespace { ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name)) // The driver namespace will be bound to the test namespace in the prefix - namespace, err := f.CreateNamespace(f.Namespace.Name, map[string]string{ + namespace, err := f.CreateNamespace(ctx, f.Namespace.Name, map[string]string{ "e2e-framework": f.BaseName, "e2e-test-namespace": f.Namespace.Name, }) @@ -589,7 +589,7 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace { if framework.TestContext.VerifyServiceAccount { ginkgo.By("Waiting for a default service account to be provisioned in namespace") - err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name) framework.ExpectNoError(err) } else { framework.Logf("Skipping waiting for service account") @@ -598,11 +598,11 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace { } // WaitForGVRDeletion waits until a non-namespaced object has been deleted -func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error { +func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName) if successful := WaitUntil(poll, timeout, func() bool { - _, err := c.Resource(gvr).Get(context.TODO(), objectName, metav1.GetOptions{}) + _, err := c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName) return true @@ -621,11 +621,11 @@ func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ob } // WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted -func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error { +func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName) if successful := WaitUntil(poll, timeout, func() bool { - _, err := c.Resource(gvr).Namespace(ns).Get(context.TODO(), objectName, metav1.GetOptions{}) + _, err := c.Resource(gvr).Namespace(ns).Get(ctx, objectName, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns) return true @@ -645,6 +645,7 @@ func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionRe // WaitUntil runs checkDone until a timeout is reached func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool { + // TODO (pohly): replace with gomega.Eventually for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { if checkDone() { framework.Logf("WaitUntil finished successfully after %v", time.Since(start)) @@ -710,8 +711,8 @@ func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, } // DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" -func DeleteStorageClass(cs clientset.Interface, className string) error { - err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) +func DeleteStorageClass(ctx context.Context, cs clientset.Interface, className string) error { + err := cs.StorageV1().StorageClasses().Delete(ctx, className, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index ce9e7106068..fe7af214752 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { f := framework.NewDefaultFramework("pv") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name var err error @@ -68,8 +68,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // and the underlying storage driver and therefore don't pass // with other kinds of clusters and drivers. e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") - e2epv.SkipIfNoDefaultStorageClass(c) - defaultScName, err = e2epv.GetDefaultStorageClassName(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) + defaultScName, err = e2epv.GetDefaultStorageClassName(ctx, c) framework.ExpectNoError(err) test := testsuites.StorageClassTest{ @@ -91,64 +91,64 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { VolumeMode: &blockMode, }, ns) - metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, f.ClientConfig(), true, false, true, false, false, false) + metricsGrabber, err = e2emetrics.NewMetricsGrabber(ctx, c, nil, f.ClientConfig(), true, false, true, false, false, false) if err != nil { framework.Failf("Error creating metrics grabber : %v", err) } }) - ginkgo.AfterEach(func() { - newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err) } else { - e2epv.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace) + e2epv.DeletePersistentVolumeClaim(ctx, c, newPvc.Name, newPvc.Namespace) if newPvc.Spec.VolumeName != "" { - err = e2epv.WaitForPersistentVolumeDeleted(c, newPvc.Spec.VolumeName, 5*time.Second, 5*time.Minute) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, newPvc.Spec.VolumeName, 5*time.Second, 5*time.Minute) framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", newPvc.Spec.VolumeName) } } if invalidSc != nil { - err := c.StorageV1().StorageClasses().Delete(context.TODO(), invalidSc.Name, metav1.DeleteOptions{}) + err := c.StorageV1().StorageClasses().Delete(ctx, invalidSc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Error deleting storageclass %v: %v", invalidSc.Name, err) invalidSc = nil } }) - provisioning := func(ephemeral bool) { + provisioning := func(ctx context.Context, ephemeral bool) { if !metricsGrabber.HasControlPlanePods() { e2eskipper.Skipf("Environment does not support getting controller-manager metrics - skipping") } ginkgo.By("Getting plugin name") - defaultClass, err := c.StorageV1().StorageClasses().Get(context.TODO(), defaultScName, metav1.GetOptions{}) + defaultClass, err := c.StorageV1().StorageClasses().Get(ctx, defaultScName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting default storageclass: %v", err) pluginName := defaultClass.Provisioner - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "Error getting c-m metrics : %v", err) storageOpMetrics := getControllerStorageMetrics(controllerMetrics, pluginName) if !ephemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) } pod := makePod(ns, pvc, ephemeral) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod %s", pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) - updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber, pluginName) + updatedStorageMetrics := waitForDetachAndGrabMetrics(ctx, storageOpMetrics, metricsGrabber, pluginName) framework.ExpectNotEqual(len(updatedStorageMetrics.latencyMetrics), 0, "Error fetching c-m updated storage metrics") framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") @@ -160,13 +160,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } } - provisioningError := func(ephemeral bool) { + provisioningError := func(ctx context.Context, ephemeral bool) { if !metricsGrabber.HasControlPlanePods() { e2eskipper.Skipf("Environment does not support getting controller-manager metrics - skipping") } ginkgo.By("Getting default storageclass") - defaultClass, err := c.StorageV1().StorageClasses().Get(context.TODO(), defaultScName, metav1.GetOptions{}) + defaultClass, err := c.StorageV1().StorageClasses().Get(ctx, defaultScName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting default storageclass: %v", err) pluginName := defaultClass.Provisioner @@ -179,50 +179,50 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { "invalidparam": "invalidvalue", }, } - _, err = c.StorageV1().StorageClasses().Create(context.TODO(), invalidSc, metav1.CreateOptions{}) + _, err = c.StorageV1().StorageClasses().Create(ctx, invalidSc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating new storageclass: %v", err) pvc.Spec.StorageClassName = &invalidSc.Name if !ephemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) framework.ExpectNotEqual(pvc, nil) } ginkgo.By("Creating a pod and expecting it to fail") pod := makePod(ns, pvc, ephemeral) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectError(err) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) ginkgo.By("Checking failure metrics") - updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() + updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "failed to get controller manager metrics") updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics, pluginName) framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") } - filesystemMode := func(isEphemeral bool) { + filesystemMode := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) } pod := makePod(ns, pvc, isEphemeral) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pvcName := pvc.Name @@ -245,11 +245,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Poll kubelet metrics waiting for the volume to be picked up // by the volume stats collector var kubeMetrics e2emetrics.KubeletMetrics - waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { + waitErr := wait.PollWithContext(ctx, 30*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { framework.Logf("Grabbing Kubelet metrics") // Grab kubelet metrics from the node the pod was scheduled on var err error - kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) + kubeMetrics, err = metricsGrabber.GrabFromKubelet(ctx, pod.Spec.NodeName) if err != nil { framework.Logf("Error fetching kubelet metrics") return false, err @@ -270,12 +270,12 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } - blockmode := func(isEphemeral bool) { + blockmode := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { - pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(context.TODO(), pvcBlock, metav1.CreateOptions{}) + pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(ctx, pvcBlock, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvcBlock, nil) } @@ -286,13 +286,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { DevicePath: "/mnt/" + pvcBlock.Name, }} pod.Spec.Containers[0].VolumeMounts = nil - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Verify volume stat metrics were collected for the referenced PVC @@ -315,7 +315,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.Logf("Grabbing Kubelet metrics") // Grab kubelet metrics from the node the pod was scheduled on var err error - kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) + kubeMetrics, err = metricsGrabber.GrabFromKubelet(ctx, pod.Spec.NodeName) if err != nil { framework.Logf("Error fetching kubelet metrics") return false, err @@ -336,27 +336,27 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } - totalTime := func(isEphemeral bool) { + totalTime := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) } pod := makePod(ns, pvc, isEphemeral) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { e2eskipper.Skipf("Could not get controller-manager metrics - skipping") } @@ -367,27 +367,27 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.ExpectNoError(err, "Invalid metric in P/V Controller metrics: %q", metricKey) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } - volumeManager := func(isEphemeral bool) { + volumeManager := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) } pod := makePod(ns, pvc, isEphemeral) - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) + kubeMetrics, err := metricsGrabber.GrabFromKubelet(ctx, pod.Spec.NodeName) framework.ExpectNoError(err) // Metrics should have dimensions plugin_name and state available @@ -397,12 +397,12 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.ExpectNoError(err, "Invalid metric in Volume Manager metrics: %q", totalVolumesKey) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } - adController := func(isEphemeral bool) { + adController := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) } @@ -410,21 +410,21 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod := makePod(ns, pvc, isEphemeral) // Get metrics - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { e2eskipper.Skipf("Could not get controller-manager metrics - skipping") } // Create pod - pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get updated metrics - updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() + updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { e2eskipper.Skipf("Could not get controller-manager metrics - skipping") } @@ -440,7 +440,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { totalVolumesKey := "attachdetach_controller_total_volumes" states := []string{"actual_state_of_world", "desired_state_of_world"} dimensions := []string{"state", "plugin_name"} - waitForADControllerStatesMetrics(metricsGrabber, totalVolumesKey, dimensions, states) + waitForADControllerStatesMetrics(ctx, metricsGrabber, totalVolumesKey, dimensions, states) // Total number of volumes in both ActualStateofWorld and DesiredStateOfWorld // states should be higher or equal than it used to be @@ -459,32 +459,32 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) } testAll := func(isEphemeral bool) { ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func(ctx context.Context) { - provisioning(isEphemeral) + provisioning(ctx, isEphemeral) }) // TODO(mauriciopoppe): after CSIMigration is turned on we're no longer reporting // the volume_provision metric (removed in #106609), issue to investigate the bug #106773 ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func(ctx context.Context) { - provisioningError(isEphemeral) + provisioningError(ctx, isEphemeral) }) ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func(ctx context.Context) { - filesystemMode(isEphemeral) + filesystemMode(ctx, isEphemeral) }) ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func(ctx context.Context) { - blockmode(isEphemeral) + blockmode(ctx, isEphemeral) }) ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func(ctx context.Context) { - totalTime(isEphemeral) + totalTime(ctx, isEphemeral) }) ginkgo.It("should create volume metrics in Volume Manager", func(ctx context.Context) { - volumeManager(isEphemeral) + volumeManager(ctx, isEphemeral) }) ginkgo.It("should create metrics for total number of volumes in A/D Controller", func(ctx context.Context) { - adController(isEphemeral) + adController(ctx, isEphemeral) }) } @@ -545,10 +545,10 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // validator used to validate each metric's values, the length of metricValues // should be 4, and the elements should be bound pv count, unbound pv count, bound // pvc count, unbound pvc count in turn. - validator := func(metricValues []map[string]int64) { + validator := func(ctx context.Context, metricValues []map[string]int64) { framework.ExpectEqual(len(metricValues), 4, "Wrong metric size: %d", len(metricValues)) - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) for i, metric := range e2emetrics { @@ -566,7 +566,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } } - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if !metricsGrabber.HasControlPlanePods() { e2eskipper.Skipf("Environment does not support getting controller-manager metrics - skipping") } @@ -575,7 +575,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns) // Initializes all original metric values. - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) for _, metric := range e2emetrics { originMetricValues = append(originMetricValues, @@ -583,11 +583,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } }) - ginkgo.AfterEach(func() { - if err := e2epv.DeletePersistentVolume(c, pv.Name); err != nil { + ginkgo.AfterEach(func(ctx context.Context) { + if err := e2epv.DeletePersistentVolume(ctx, c, pv.Name); err != nil { framework.Failf("Error deleting pv: %v", err) } - if err := e2epv.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil { + if err := e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, pvc.Namespace); err != nil { framework.Failf("Error deleting pvc: %v", err) } @@ -596,45 +596,45 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { }) ginkgo.It("should create none metrics for pvc controller before creating any PV or PVC", func(ctx context.Context) { - validator([]map[string]int64{nil, nil, nil, nil}) + validator(ctx, []map[string]int64{nil, nil, nil, nil}) }) ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only", - func() { + func(ctx context.Context) { var err error - pv, err = e2epv.CreatePV(c, f.Timeouts, pv) + pv, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv: %v", err) - waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey) - validator([]map[string]int64{nil, {className: 1}, nil, nil}) + waitForPVControllerSync(ctx, metricsGrabber, unboundPVKey, classKey) + validator(ctx, []map[string]int64{nil, {className: 1}, nil, nil}) }) ginkgo.It("should create unbound pvc count metrics for pvc controller after creating pvc only", - func() { + func(ctx context.Context) { var err error - pvc, err = e2epv.CreatePVC(c, ns, pvc) + pvc, err = e2epv.CreatePVC(ctx, c, ns, pvc) framework.ExpectNoError(err, "Error creating pvc: %v", err) - waitForPVControllerSync(metricsGrabber, unboundPVCKey, namespaceKey) - validator([]map[string]int64{nil, nil, nil, {ns: 1}}) + waitForPVControllerSync(ctx, metricsGrabber, unboundPVCKey, namespaceKey) + validator(ctx, []map[string]int64{nil, nil, nil, {ns: 1}}) }) ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc", - func() { + func(ctx context.Context) { var err error - pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err, "Error creating pv pvc: %v", err) - waitForPVControllerSync(metricsGrabber, boundPVKey, classKey) - waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey) - validator([]map[string]int64{{className: 1}, nil, {ns: 1}, nil}) + waitForPVControllerSync(ctx, metricsGrabber, boundPVKey, classKey) + waitForPVControllerSync(ctx, metricsGrabber, boundPVCKey, namespaceKey) + validator(ctx, []map[string]int64{{className: 1}, nil, {ns: 1}, nil}) }) ginkgo.It("should create total pv count metrics for with plugin and volume mode labels after creating pv", - func() { + func(ctx context.Context) { var err error dimensions := []string{pluginNameKey, volumeModeKey} - pv, err = e2epv.CreatePV(c, f.Timeouts, pv) + pv, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv: %v", err) - waitForPVControllerSync(metricsGrabber, totalPVKey, pluginNameKey) - controllerMetrics, err := metricsGrabber.GrabFromControllerManager() + waitForPVControllerSync(ctx, metricsGrabber, totalPVKey, pluginNameKey) + controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) err = testutil.ValidateMetrics(testutil.Metrics(controllerMetrics), totalPVKey, dimensions...) framework.ExpectNoError(err, "Invalid metric in Controller Manager metrics: %q", totalPVKey) @@ -660,7 +660,7 @@ func newStorageControllerMetrics() *storageControllerMetrics { } } -func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *e2emetrics.Grabber, pluginName string) *storageControllerMetrics { +func waitForDetachAndGrabMetrics(ctx context.Context, oldMetrics *storageControllerMetrics, metricsGrabber *e2emetrics.Grabber, pluginName string) *storageControllerMetrics { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, @@ -674,7 +674,7 @@ func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGr } verifyMetricFunc := func() (bool, error) { - updatedMetrics, err := metricsGrabber.GrabFromControllerManager() + updatedMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { framework.Logf("Error fetching controller-manager metrics") @@ -698,7 +698,7 @@ func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGr return true, nil } - waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) + waitErr := wait.ExponentialBackoffWithContext(ctx, backoff, verifyMetricFunc) framework.ExpectNoError(waitErr, "Unable to get updated metrics for plugin %s", pluginName) return updatedStorageMetrics } @@ -815,21 +815,21 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string } // Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero. -func waitForPVControllerSync(metricsGrabber *e2emetrics.Grabber, metricName, dimension string) { +func waitForPVControllerSync(ctx context.Context, metricsGrabber *e2emetrics.Grabber, metricName, dimension string) { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, Steps: 21, } verifyMetricFunc := func() (bool, error) { - updatedMetrics, err := metricsGrabber.GrabFromControllerManager() + updatedMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { framework.Logf("Error fetching controller-manager metrics") return false, err } return len(testutil.GetMetricValuesForLabel(testutil.Metrics(updatedMetrics), metricName, dimension)) > 0, nil } - waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) + waitErr := wait.ExponentialBackoffWithContext(ctx, backoff, verifyMetricFunc) framework.ExpectNoError(waitErr, "Unable to get pv controller metrics") } @@ -860,14 +860,14 @@ func getStatesMetrics(metricKey string, givenMetrics testutil.Metrics) map[strin return states } -func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metricName string, dimensions []string, stateNames []string) { +func waitForADControllerStatesMetrics(ctx context.Context, metricsGrabber *e2emetrics.Grabber, metricName string, dimensions []string, stateNames []string) { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, Steps: 21, } verifyMetricFunc := func() (bool, error) { - updatedMetrics, err := metricsGrabber.GrabFromControllerManager() + updatedMetrics, err := metricsGrabber.GrabFromControllerManager(ctx) if err != nil { e2eskipper.Skipf("Could not get controller-manager metrics - skipping") return false, err @@ -884,7 +884,7 @@ func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metric } return true, nil } - waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) + waitErr := wait.ExponentialBackoffWithContext(ctx, backoff, verifyMetricFunc) framework.ExpectNoError(waitErr, "Unable to get A/D controller metrics") } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 623bcbfba21..9242e3f1d15 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -161,11 +161,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ "type": "pd-ssd", - "zone": getRandomClusterZone(c), + "zone": getRandomClusterZone(ctx, c), }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -183,7 +183,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -199,11 +199,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Provisioner: "kubernetes.io/aws-ebs", Parameters: map[string]string{ "type": "gp2", - "zone": getRandomClusterZone(c), + "zone": getRandomClusterZone(ctx, c), }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -222,7 +222,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "3.5Gi", ExpectedSize: "4Gi", // 4 GiB is minimum for io1 - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -240,7 +240,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "500Gi", // minimum for sc1 ExpectedSize: "500Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -258,7 +258,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "500Gi", // minimum for st1 ExpectedSize: "500Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -276,7 +276,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -293,7 +293,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, @@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, @@ -321,7 +321,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1.5Gi", ExpectedSize: "1.5Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, @@ -334,7 +334,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, @@ -386,7 +386,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: func(claim *v1.PersistentVolumeClaim) { + PvCheck: func(ctx context.Context, claim *v1.PersistentVolumeClaim) { volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") @@ -409,14 +409,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { pv := test.TestDynamicProvisioning(ctx) ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) - framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) + framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) - framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, pv.Spec.GCEPersistentDisk.PDName)) ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name)) - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) - framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 1*time.Second, 30*time.Second)) }) ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func(ctx context.Context) { @@ -438,7 +438,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } class := newStorageClass(test, ns, "race") - class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) + class, err := c.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteStorageClass, c, class.Name) @@ -451,13 +451,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &class.Name, VolumeMode: &test.VolumeMode, }, ns) - tmpClaim, err := e2epv.CreatePVC(c, ns, claim) + tmpClaim, err := e2epv.CreatePVC(ctx, c, ns, claim) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, tmpClaim.Name, ns)) } ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) - residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) + residualPVs, err = waitForProvisionedVolumesDeleted(ctx, c, class.Name) // Cleanup the test resources before breaking ginkgo.DeferCleanup(deleteProvisionedVolumesAndDisks, c, residualPVs) framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs)) @@ -473,7 +473,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // is already deleted. e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") ginkgo.By("creating PD") - diskName, err := e2epv.CreatePDWithRetry() + diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err) ginkgo.By("creating PV") @@ -510,26 +510,26 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, } } - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get Released") - err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, timeouts.PVReclaim) + err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased, c, pv.Name, 2*time.Second, timeouts.PVReclaim) framework.ExpectNoError(err) ginkgo.By("deleting the PD") - err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource) + err = e2epv.DeletePVSource(ctx, &pv.Spec.PersistentVolumeSource) framework.ExpectNoError(err) ginkgo.By("changing the PV reclaim policy") - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete - pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get deleted") - err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PVDelete) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, timeouts.PVDelete) framework.ExpectNoError(err) }) }) @@ -545,11 +545,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Name: serviceAccountName, } - err := e2eauth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject) + err := e2eauth.BindClusterRole(ctx, c.RbacV1(), "system:persistent-volume-provisioner", ns, subject) framework.ExpectNoError(err) roleName := "leader-locking-nfs-provisioner" - _, err = f.ClientSet.RbacV1().Roles(ns).Create(context.TODO(), &rbacv1.Role{ + _, err = f.ClientSet.RbacV1().Roles(ns).Create(ctx, &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, }, @@ -561,16 +561,16 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create leader-locking role") - err = e2eauth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject) + err = e2eauth.BindRoleInNamespace(ctx, c.RbacV1(), roleName, ns, subject) framework.ExpectNoError(err) - err = e2eauth.WaitForAuthorizationUpdate(c.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(ctx, c.AuthorizationV1(), serviceaccount.MakeUsername(ns, serviceAccountName), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization") ginkgo.By("creating an external dynamic provisioner pod") - pod := utils.StartExternalProvisioner(c, ns, externalPluginName) + pod := utils.StartExternalProvisioner(ctx, c, ns, externalPluginName) ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name) ginkgo.By("creating a StorageClass") @@ -601,7 +601,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.Describe("DynamicProvisioner Default", func() { ginkgo.It("should create and delete default persistent volumes [Slow]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("creating a claim with no annotation") test := testsuites.StorageClassTest{ @@ -625,9 +625,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Modifying the default storage class can be disruptive to other tests that depend on it ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) - scName, scErr := e2epv.GetDefaultStorageClassName(c) + scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c) framework.ExpectNoError(scErr) test := testsuites.StorageClassTest{ @@ -637,24 +637,24 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } ginkgo.By("setting the is-default StorageClass annotation to false") - verifyDefaultStorageClass(c, scName, true) + verifyDefaultStorageClass(ctx, c, scName, true) ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true") - updateDefaultStorageClass(c, scName, "false") + updateDefaultStorageClass(ctx, c, scName, "false") ginkgo.By("creating a claim with default storageclass and expecting it to timeout") claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, claim, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, claim.Name, ns) // The claim should timeout phase:Pending - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) framework.ExpectError(err) framework.Logf(err.Error()) - claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) }) @@ -662,9 +662,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Modifying the default storage class can be disruptive to other tests that depend on it ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") - e2epv.SkipIfNoDefaultStorageClass(c) + e2epv.SkipIfNoDefaultStorageClass(ctx, c) - scName, scErr := e2epv.GetDefaultStorageClassName(c) + scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c) framework.ExpectNoError(scErr) test := testsuites.StorageClassTest{ @@ -674,26 +674,26 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } ginkgo.By("removing the is-default StorageClass annotation") - verifyDefaultStorageClass(c, scName, true) + verifyDefaultStorageClass(ctx, c, scName, true) ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true") - updateDefaultStorageClass(c, scName, "") + updateDefaultStorageClass(ctx, c, scName, "") ginkgo.By("creating a claim with default storageclass and expecting it to timeout") claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns)) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, claim.Name, ns)) }() // The claim should timeout phase:Pending - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) framework.ExpectError(err) framework.Logf(err.Error()) - claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) }) @@ -720,11 +720,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &test.Class.Name, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) + claim, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) - err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) + err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -735,7 +735,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // key was not provisioned. If the event is not delivered, we check that the volume is not Bound for whole // ClaimProvisionTimeout in the very same loop. err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) { - events, err := c.CoreV1().Events(claim.Namespace).List(context.TODO(), metav1.ListOptions{}) + events, err := c.CoreV1().Events(claim.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("could not list PVC events in %s: %v", claim.Namespace, err) } @@ -745,7 +745,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } } - pvc, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) if err != nil { return true, err } @@ -765,14 +765,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) }) -func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { - sc, err := c.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) +func verifyDefaultStorageClass(ctx context.Context, c clientset.Interface, scName string, expectedDefault bool) { + sc, err := c.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(storageutil.IsDefaultAnnotation(sc.ObjectMeta), expectedDefault) } -func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { - sc, err := c.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) +func updateDefaultStorageClass(ctx context.Context, c clientset.Interface, scName string, defaultStr string) { + sc, err := c.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) framework.ExpectNoError(err) if defaultStr == "" { @@ -786,14 +786,14 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr } - _, err = c.StorageV1().StorageClasses().Update(context.TODO(), sc, metav1.UpdateOptions{}) + _, err = c.StorageV1().StorageClasses().Update(ctx, sc, metav1.UpdateOptions{}) framework.ExpectNoError(err) expectedDefault := false if defaultStr == "true" { expectedDefault = true } - verifyDefaultStorageClass(c, scName, expectedDefault) + verifyDefaultStorageClass(ctx, c, scName, expectedDefault) } func getDefaultPluginName() string { @@ -872,13 +872,13 @@ func getStorageClass( // waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's // StorageClass. Returns either an error and nil values or the remaining PVs and their count. -func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) { +func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) { var remainingPVs []*v1.PersistentVolume err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { remainingPVs = []*v1.PersistentVolume{} - allPVs, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + allPVs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { return true, err } @@ -900,27 +900,27 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* } // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" -func deleteStorageClass(c clientset.Interface, className string) { - err := c.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) +func deleteStorageClass(ctx context.Context, c clientset.Interface, className string) { + err := c.StorageV1().StorageClasses().Delete(ctx, className, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } // deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs. -func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) { +func deleteProvisionedVolumesAndDisks(ctx context.Context, c clientset.Interface, pvs []*v1.PersistentVolume) { framework.Logf("Remaining PersistentVolumes:") for i, pv := range pvs { framework.Logf("\t%d) %s", i+1, pv.Name) } for _, pv := range pvs { - framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)) - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name)) + framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name)) } } -func getRandomClusterZone(c clientset.Interface) string { - zones, err := e2enode.GetClusterZones(c) +func getRandomClusterZone(ctx context.Context, c clientset.Interface) string { + zones, err := e2enode.GetClusterZones(ctx, c) zone := "" framework.ExpectNoError(err) if len(zones) != 0 { diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 9e3770bf866..40170fe9f21 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -66,11 +66,11 @@ var _ = utils.SIGDescribe("Volumes", func() { "third": "this is the third file", }, } - if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configmap: %v", err) } defer func() { - _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) + _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}) }() // Test one ConfigMap mounted several times to test #28502 @@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("Volumes", func() { ExpectedContent: "this is the second file", }, } - e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests) }) }) }) diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 9bbc1ca1196..468da6b55d6 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 4. Create a POD using the PVC. 5. Verify Disk and Attached to the node. */ - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet @@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() clientPod = nil pvc = nil pv = nil - nodeInfo = GetReadySchedulableRandomNodeInfo() + nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} selector = metav1.SetAsLabelSelector(volLabel) @@ -97,29 +97,29 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() StorageClassName: &emptyStorageClass, } ginkgo.By("Creating the PV and PVC") - pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) ginkgo.DeferCleanup(func() { - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "AfterEach: failed to delete PV ", pv.Name) }) ginkgo.DeferCleanup(func() { - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name) }) - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) ginkgo.By("Creating the Client Pod") - clientPod, err = e2epod.CreateClientPod(c, ns, pvc) + clientPod, err = e2epod.CreateClientPod(ctx, c, ns, pvc) framework.ExpectNoError(err) node = clientPod.Spec.NodeName ginkgo.DeferCleanup(func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) }) ginkgo.DeferCleanup(func() { - framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node), "wait for vsphere disk to detach") + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, node), "wait for vsphere disk to detach") }) ginkgo.By("Verify disk should be attached to the node") - isAttached, err := diskIsAttached(volumePath, node) + isAttached, err := diskIsAttached(ctx, volumePath, node) framework.ExpectNoError(err) if !isAttached { framework.Failf("Disk %s is not attached with the node", volumePath) @@ -128,11 +128,11 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { ginkgo.By("Deleting the Claim") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil ginkgo.By("Deleting the Pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) }) /* @@ -144,11 +144,11 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() */ ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { ginkgo.By("Deleting the Persistent Volume") - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) pv = nil ginkgo.By("Deleting the pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) }) /* This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts. @@ -159,7 +159,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() */ ginkgo.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() - utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, e2epod.VolumeMountPath1) + utils.TestKubeletRestartsAndRestoresMount(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) /* @@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() */ ginkgo.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() - utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, e2epod.VolumeMountPath1) + utils.TestVolumeUnmountsFromDeletedPod(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) /* @@ -188,13 +188,14 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() */ ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func(ctx context.Context) { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) + err := c.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) framework.ExpectNoError(err) - err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) + err = framework.WaitForNamespacesDeleted(ctx, c, []string{ns}, 3*time.Minute) framework.ExpectNoError(err) ginkgo.By("Verifying Persistent Disk detaches") - waitForVSphereDiskToDetach(volumePath, node) + err = waitForVSphereDiskToDetach(ctx, volumePath, node) + framework.ExpectNoError(err) }) }) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 1d4062a57ba..9255f7c0cc8 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -47,18 +47,18 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo nodeInfo *NodeInfo ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) }) ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc) Bootstrap(f) - nodeInfo = GetReadySchedulableRandomNodeInfo() + nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) pv = nil pvc = nil volumePath = "" @@ -78,14 +78,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo */ ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func(ctx context.Context) { var err error - volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) + volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) framework.ExpectNoError(err) - deletePVCAfterBind(c, ns, pvc, pv) + deletePVCAfterBind(ctx, c, ns, pvc, pv) pvc = nil ginkgo.By("verify pv is deleted") - err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) pv = nil @@ -107,45 +107,45 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func(ctx context.Context) { var err error - volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) + volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) framework.ExpectNoError(err) // Wait for PV and PVC to Bind - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) ginkgo.By("Creating the Pod") - pod, err := e2epod.CreateClientPod(c, ns, pvc) + pod, err := e2epod.CreateClientPod(ctx, c, ns, pvc) framework.ExpectNoError(err) ginkgo.By("Deleting the Claim") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil // Verify PV is Present, after PVC is deleted and PV status should be Failed. - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - err = e2epv.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second) + err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second) framework.ExpectNoError(err) ginkgo.By("Verify the volume is attached to the node") - isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(verifyDiskAttachedError) if !isVolumeAttached { framework.Failf("Disk %s is not attached with the node %s", pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } ginkgo.By("Verify the volume is accessible and available in the pod") - verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}) + verifyVSphereVolumesAccessible(ctx, c, pod, []*v1.PersistentVolume{pv}) framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim") ginkgo.By("Deleting the Pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod), "Failed to delete pod ", pod.Name) ginkgo.By("Verify PV is detached from the node after Pod is deleted") - err = waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + err = waitForVSphereDiskToDetach(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(err) ginkgo.By("Verify PV should be deleted automatically") - framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 1*time.Second, 30*time.Second)) pv = nil volumePath = "" }) @@ -172,41 +172,41 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo var err error var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) - volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain) + volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain) framework.ExpectNoError(err) - writeContentToVSpherePV(c, f.Timeouts, pvc, volumeFileContent) + writeContentToVSpherePV(ctx, c, f.Timeouts, pvc, volumeFileContent) ginkgo.By("Delete PVC") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil ginkgo.By("Verify PV is retained") framework.Logf("Waiting for PV %v to become Released", pv.Name) - err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) + err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) ginkgo.By("Creating the PV for same volume path") pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the pv and pvc to bind") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) - verifyContentOfVSpherePV(c, f.Timeouts, pvc, volumeFileContent) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) + verifyContentOfVSpherePV(ctx, c, f.Timeouts, pvc, volumeFileContent) }) }) }) // Test Setup for persistentvolumereclaim tests for vSphere Provider -func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { +func testSetupVSpherePersistentVolumeReclaim(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { ginkgo.By("running testSetupVSpherePersistentVolumeReclaim") ginkgo.By("creating vmdk") volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) @@ -215,41 +215,41 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No } ginkgo.By("creating the pv") pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) return } // Test Cleanup for persistentvolumereclaim tests for vSphere Provider -func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { +func testCleanupVSpherePersistentVolumeReclaim(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { ginkgo.By("running testCleanupVSpherePersistentVolumeReclaim") if len(volumePath) > 0 { err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) framework.ExpectNoError(err) } if pv != nil { - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) } if pvc != nil { - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) } } // func to wait until PV and PVC bind and once bind completes, delete the PVC -func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { +func deletePVCAfterBind(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { var err error ginkgo.By("wait for the pv and pvc to bind") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) ginkgo.By("delete pvc") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) - _, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) + _, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index e0b720c1f00..fc06c9f9dd6 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -63,13 +63,13 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele err error nodeInfo *NodeInfo ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") c = f.ClientSet ns = f.Namespace.Name Bootstrap(f) - nodeInfo = GetReadySchedulableRandomNodeInfo() - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) ssdlabels = make(map[string]string) ssdlabels["volume-type"] = "ssd" vvollabels = make(map[string]string) @@ -78,38 +78,38 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele }) ginkgo.Describe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() { - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Running clean up actions") if framework.ProviderIs("vsphere") { - testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol) + testCleanupVSpherePVClabelselector(ctx, c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol) } }) ginkgo.It("should bind volume with claim for given label", func(ctx context.Context) { - volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) + volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(ctx, c, nodeInfo, ns, ssdlabels, vvollabels) framework.ExpectNoError(err) ginkgo.By("wait for the pvcSsd to bind with pvSsd") - framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pvSsd, pvcSsd)) + framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pvSsd, pvcSsd)) ginkgo.By("Verify status of pvcVvol is pending") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) ginkgo.By("delete pvcSsd") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) ginkgo.By("verify pvSsd is deleted") - err = e2epv.WaitForPersistentVolumeDeleted(c, pvSsd.Name, 3*time.Second, 300*time.Second) + err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pvSsd.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) volumePath = "" ginkgo.By("delete pvcVvol") - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) }) }) }) -func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) { +func testSetupVSpherePVClabelselector(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) { ginkgo.By("creating vmdk") volumePath = "" volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) @@ -119,36 +119,36 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ginkgo.By("creating the pv with label volume-type:ssd") pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) - pvSsd, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pvSsd, metav1.CreateOptions{}) + pvSsd, err = c.CoreV1().PersistentVolumes().Create(ctx, pvSsd, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:vvol") pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) - pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcVvol, metav1.CreateOptions{}) + pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvcVvol, metav1.CreateOptions{}) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:ssd") pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) - pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcSsd, metav1.CreateOptions{}) + pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvcSsd, metav1.CreateOptions{}) return } -func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) { +func testCleanupVSpherePVClabelselector(ctx context.Context, c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) { ginkgo.By("running testCleanupVSpherePVClabelselector") if len(volumePath) > 0 { nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) } if pvcSsd != nil { - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) } if pvcVvol != nil { - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) } if pvSsd != nil { - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name) + framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name) } } diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 0ef963e1f41..5ab29b8c2a4 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) var err error - nodes, err = e2enode.GetReadySchedulableNodes(client) + nodes, err = e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) if len(nodes.Items) < 2 { e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) @@ -135,7 +135,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { case storageclass4: scParams[Datastore] = datastoreName } - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, scname, metav1.DeleteOptions{}) @@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { volumeCountPerInstance = volumeCount } volumeCount = volumeCount - volumeCountPerInstance - go VolumeCreateAndAttach(client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan) + go VolumeCreateAndAttach(ctx, client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan) } // Get the list of all volumes attached to each node from the go routines by reading the data from the channel @@ -157,20 +157,20 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...) } } - podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + podList, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list pods") for _, pod := range podList.Items { pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) ginkgo.By("Deleting pod") - err = e2epod.DeletePodWithWait(client, &pod) + err = e2epod.DeletePodWithWait(ctx, client, &pod) framework.ExpectNoError(err) } ginkgo.By("Waiting for volumes to be detached from the node") - err = waitForVSphereDisksToDetach(nodeVolumeMap) + err = waitForVSphereDisksToDetach(ctx, nodeVolumeMap) framework.ExpectNoError(err) for _, pvcClaim := range pvcClaimList { - err = e2epv.DeletePersistentVolumeClaim(client, pvcClaim, namespace) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvcClaim, namespace) framework.ExpectNoError(err) } }) @@ -188,7 +188,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string { } // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale -func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { +func VolumeCreateAndAttach(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { defer ginkgo.GinkgoRecover() nodeVolumeMap := make(map[string][]string) nodeSelectorIndex := 0 @@ -199,26 +199,26 @@ func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.Timeo pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod) for i := 0; i < volumesPerPod; i++ { ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) framework.ExpectNoError(err) pvclaims[i] = pvclaim } ginkgo.By("Waiting for claim to be in bound phase") - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("Creating pod to attach PV to the node") nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] // Create pod to attach Volume to Node - pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") framework.ExpectNoError(err) for _, pv := range persistentvolumes { nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) } ginkgo.By("Verify the volume is accessible and available in the pod") - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) nodeSelectorIndex++ } nodeVolumeMapChan <- nodeVolumeMap diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 2317b871d0a..f87f220c930 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -74,84 +74,84 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { scParameters := make(map[string]string) scParameters["diskformat"] = "thin" scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") - sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{}) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{}) ginkgo.By("Creating statefulset") - statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace) + statefulset := e2estatefulset.CreateStatefulSet(ctx, client, manifestPath, namespace) ginkgo.DeferCleanup(e2estatefulset.DeleteAllStatefulSets, client, namespace) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready - e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas) - framework.ExpectNoError(e2estatefulset.CheckMount(client, statefulset, mountPath)) - ssPodsBeforeScaleDown := e2estatefulset.GetPodList(client, statefulset) + e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) + framework.ExpectNoError(e2estatefulset.CheckMount(ctx, client, statefulset, mountPath)) + ssPodsBeforeScaleDown := e2estatefulset.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") // Get the list of Volumes attached to Pods before scale down volumesBeforeScaleDown := make(map[string]string) for _, sspod := range ssPodsBeforeScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { - volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumePath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName } } } ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) - _, scaledownErr := e2estatefulset.Scale(client, statefulset, replicas-1) + _, scaledownErr := e2estatefulset.Scale(ctx, client, statefulset, replicas-1) framework.ExpectNoError(scaledownErr) - e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas-1) + e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas-1) // After scale down, verify vsphere volumes are detached from deleted pods ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") for _, sspod := range ssPodsBeforeScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { framework.Failf("Error in getting Pod %s: %v", sspod.Name, err) } for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { - vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName) - framework.ExpectNoError(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, vSpherediskPath, sspod.Spec.NodeName)) } } } } ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) - _, scaleupErr := e2estatefulset.Scale(client, statefulset, replicas) + _, scaleupErr := e2estatefulset.Scale(ctx, client, statefulset, replicas) framework.ExpectNoError(scaleupErr) - e2estatefulset.WaitForStatusReplicas(client, statefulset, replicas) - e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas) + e2estatefulset.WaitForStatusReplicas(ctx, client, statefulset, replicas) + e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - ssPodsAfterScaleUp := e2estatefulset.GetPodList(client, statefulset) + ssPodsAfterScaleUp := e2estatefulset.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") // After scale up, verify all vsphere volumes are attached to node VMs. ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { - err := e2epod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout) + err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout) framework.ExpectNoError(err) - pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) + pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, volumespec := range pod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { - vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) // Verify scale up has re-attached the same volumes and not introduced new volume if volumesBeforeScaleDown[vSpherediskPath] == "" { framework.Failf("Volume: %q was not attached to the Node: %q before scale down", vSpherediskPath, sspod.Spec.NodeName) } - isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) + isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, vSpherediskPath, sspod.Spec.NodeName) if !isVolumeAttached { framework.Failf("Volume: %q is not attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) } diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 02dfbec7931..323bb37515f 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -59,13 +59,13 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. @@ -97,23 +97,23 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec, metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, scWithDatastoreSpec, metav1.CreateOptions{}) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) @@ -125,7 +125,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun wg.Add(instances) for instanceCount := 0; instanceCount < instances; instanceCount++ { instanceID := fmt.Sprintf("Thread:%v", instanceCount+1) - go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg) + go PerformVolumeLifeCycleInParallel(ctx, f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg) } wg.Wait() }) @@ -134,56 +134,56 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun // PerformVolumeLifeCycleInParallel performs volume lifecycle operations // Called as a go routine to perform operations in parallel -func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) { +func PerformVolumeLifeCycleInParallel(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) { defer wg.Done() defer ginkgo.GinkgoRecover() for iterationCount := 0; iterationCount < iterations; iterationCount++ { logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1) ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) // Create pod to attach Volume to Node - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) - err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow) framework.ExpectNoError(err) // Get the copy of the Pod to know the assigned node name. - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) - isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) if !isVolumeAttached { framework.Failf("Volume: %s is not attached to the node: %v", persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } framework.ExpectNoError(verifyDiskAttachedError) ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) - err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + err = waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) - err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 3e2652290a5..0e01a347502 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -69,13 +69,13 @@ const ( ) // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes -func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error { +func waitForVSphereDisksToDetach(ctx context.Context, nodeVolumes map[string][]string) error { var ( detachTimeout = 5 * time.Minute detachPollTime = 10 * time.Second ) - waitErr := wait.Poll(detachPollTime, detachTimeout, func() (bool, error) { - attachedResult, err := disksAreAttached(nodeVolumes) + waitErr := wait.PollWithContext(ctx, detachPollTime, detachTimeout, func(ctx context.Context) (bool, error) { + attachedResult, err := disksAreAttached(ctx, nodeVolumes) if err != nil { return false, err } @@ -100,7 +100,7 @@ func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error { } // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes -func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error { +func waitForVSphereDiskStatus(ctx context.Context, volumePath string, nodeName string, expectedState volumeState) error { var ( currentState volumeState timeout = 6 * time.Minute @@ -117,8 +117,8 @@ func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeStateDetached: "detached from", } - waitErr := wait.Poll(pollTime, timeout, func() (bool, error) { - diskAttached, err := diskIsAttached(volumePath, nodeName) + waitErr := wait.PollWithContext(ctx, pollTime, timeout, func(ctx context.Context) (bool, error) { + diskAttached, err := diskIsAttached(ctx, volumePath, nodeName) if err != nil { return true, err } @@ -141,13 +141,13 @@ func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState } // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes -func waitForVSphereDiskToAttach(volumePath string, nodeName string) error { - return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached) +func waitForVSphereDiskToAttach(ctx context.Context, volumePath string, nodeName string) error { + return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateAttached) } // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes -func waitForVSphereDiskToDetach(volumePath string, nodeName string) error { - return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached) +func waitForVSphereDiskToDetach(ctx context.Context, volumePath string, nodeName string) error { + return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateDetached) } // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels @@ -198,14 +198,14 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str } // function to write content to the volume backed by given PVC -func writeContentToVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { - utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") +func writeContentToVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { + utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") framework.Logf("Done with writing content to volume") } // function to verify content is matching on the volume backed for given PVC -func verifyContentOfVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { - utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data") +func verifyContentOfVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { + utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data") framework.Logf("Successfully verified content of the volume") } @@ -373,12 +373,12 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths } // verify volumes are attached to the node and are accessible in pod -func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) { +func verifyVSphereVolumesAccessible(ctx context.Context, c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) { nodeName := pod.Spec.NodeName namespace := pod.Namespace for index, pv := range persistentvolumes { // Verify disks are attached to the node - isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) + isAttached, err := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, nodeName) framework.ExpectNoError(err) if !isAttached { framework.Failf("disk %v is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) @@ -391,7 +391,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste } // verify volumes are created on one of the specified zones -func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) { +func verifyVolumeCreationOnRightZone(ctx context.Context, persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) { for _, pv := range persistentvolumes { volumePath := pv.Spec.VsphereVolume.VolumePath // Extract datastoreName from the volume path in the pv spec @@ -399,7 +399,7 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath) datastoreName := datastorePathObj.Datastore nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Get the datastore object reference from the datastore name datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName) @@ -424,10 +424,10 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n } // Get vSphere Volume Path from PVC -func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string { - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) +func getvSphereVolumePathFromClaim(ctx context.Context, client clientset.Interface, namespace string, claimName string) string { + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{}) framework.ExpectNoError(err) - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) return pv.Spec.VsphereVolume.VolumePath } @@ -601,8 +601,8 @@ func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volP } // get .vmx file path for a virtual machine -func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { - ctx, cancel := context.WithCancel(context.Background()) +func getVMXFilePath(ctx context.Context, vmObject *object.VirtualMachine) (vmxPath string) { + ctx, cancel := context.WithCancel(ctx) defer cancel() var nodeVM mo.VirtualMachine @@ -616,10 +616,10 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { } // verify ready node count. Try up to 3 minutes. Return true if count is expected count -func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { +func verifyReadyNodeCount(ctx context.Context, client clientset.Interface, expectedNodes int) bool { numNodes := 0 for i := 0; i < 36; i++ { - nodeList, err := e2enode.GetReadySchedulableNodes(client) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) numNodes = len(nodeList.Items) @@ -632,8 +632,8 @@ func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { } // poweroff nodeVM and confirm the poweroff state -func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) { - ctx, cancel := context.WithCancel(context.Background()) +func poweroffNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { + ctx, cancel := context.WithCancel(ctx) defer cancel() framework.Logf("Powering off node VM %s", nodeName) @@ -645,8 +645,8 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) { } // poweron nodeVM and confirm the poweron state -func poweronNodeVM(nodeName string, vm *object.VirtualMachine) { - ctx, cancel := context.WithCancel(context.Background()) +func poweronNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { + ctx, cancel := context.WithCancel(ctx) defer cancel() framework.Logf("Powering on node VM %s", nodeName) @@ -657,11 +657,11 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) { } // unregister a nodeVM from VC -func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) { - ctx, cancel := context.WithCancel(context.Background()) +func unregisterNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { + ctx, cancel := context.WithCancel(ctx) defer cancel() - poweroffNodeVM(nodeName, vm) + poweroffNodeVM(ctx, nodeName, vm) framework.Logf("Unregistering node VM %s", nodeName) err := vm.Unregister(ctx) @@ -669,8 +669,8 @@ func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) { } // register a nodeVM into a VC -func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) { - ctx, cancel := context.WithCancel(context.Background()) +func registerNodeVM(ctx context.Context, nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) { + ctx, cancel := context.WithCancel(ctx) defer cancel() framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath) @@ -690,12 +690,12 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso vm, err := finder.VirtualMachine(ctx, vmPath) framework.ExpectNoError(err) - poweronNodeVM(nodeName, vm) + poweronNodeVM(ctx, nodeName, vm) } // disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state -func disksAreAttached(nodeVolumes map[string][]string) (map[string]map[string]bool, error) { - ctx, cancel := context.WithCancel(context.Background()) +func disksAreAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) { + ctx, cancel := context.WithCancel(ctx) defer cancel() disksAttached := make(map[string]map[string]bool) @@ -711,7 +711,7 @@ func disksAreAttached(nodeVolumes map[string][]string) (map[string]map[string]bo for vm, volumes := range vmVolumes { volumeAttachedMap := make(map[string]bool) for _, volume := range volumes { - attached, err := diskIsAttached(volume, vm) + attached, err := diskIsAttached(ctx, volume, vm) if err != nil { return nil, err } @@ -723,9 +723,9 @@ func disksAreAttached(nodeVolumes map[string][]string) (map[string]map[string]bo } // diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin. -func diskIsAttached(volPath string, nodeName string) (bool, error) { +func diskIsAttached(ctx context.Context, volPath string, nodeName string) (bool, error) { // Create context - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) Connect(ctx, nodeInfo.VSphere) @@ -752,8 +752,8 @@ func getUUIDFromProviderID(providerID string) string { } // GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state -func GetReadySchedulableNodeInfos() []*NodeInfo { - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) +func GetReadySchedulableNodeInfos(ctx context.Context) []*NodeInfo { + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) var nodesInfo []*NodeInfo for _, node := range nodeList.Items { @@ -768,18 +768,18 @@ func GetReadySchedulableNodeInfos() []*NodeInfo { // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node. // if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly // and it's associated NodeInfo object is returned. -func GetReadySchedulableRandomNodeInfo() *NodeInfo { - nodesInfo := GetReadySchedulableNodeInfos() +func GetReadySchedulableRandomNodeInfo(ctx context.Context) *NodeInfo { + nodesInfo := GetReadySchedulableNodeInfos(ctx) gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty()) return nodesInfo[rand.Int()%len(nodesInfo)] } // invokeVCenterServiceControl invokes the given command for the given service // via service-control on the given vCenter host over SSH. -func invokeVCenterServiceControl(command, service, host string) error { +func invokeVCenterServiceControl(ctx context.Context, command, service, host string) error { sshCmd := fmt.Sprintf("service-control --%s %s", command, service) framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host) - result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err) @@ -789,8 +789,8 @@ func invokeVCenterServiceControl(command, service, host string) error { // expectVolumeToBeAttached checks if the given Volume is attached to the given // Node, else fails. -func expectVolumeToBeAttached(nodeName, volumePath string) { - isAttached, err := diskIsAttached(volumePath, nodeName) +func expectVolumeToBeAttached(ctx context.Context, nodeName, volumePath string) { + isAttached, err := diskIsAttached(ctx, volumePath, nodeName) framework.ExpectNoError(err) if !isAttached { framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) @@ -799,12 +799,12 @@ func expectVolumeToBeAttached(nodeName, volumePath string) { // expectVolumesToBeAttached checks if the given Volumes are attached to the // corresponding set of Nodes, else fails. -func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) { +func expectVolumesToBeAttached(ctx context.Context, pods []*v1.Pod, volumePaths []string) { for i, pod := range pods { nodeName := pod.Spec.NodeName volumePath := volumePaths[i] ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) - expectVolumeToBeAttached(nodeName, volumePath) + expectVolumeToBeAttached(ctx, nodeName, volumePath) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 97d695657a7..ff965b1cede 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -52,12 +52,12 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v nodeInfo *NodeInfo ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - nodeInfo = GetReadySchedulableRandomNodeInfo() + nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) scParameters = make(map[string]string) clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore) }) @@ -92,25 +92,25 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) ginkgo.By("Creating pod") - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName ginkgo.By("Verifying volume is attached") - expectVolumeToBeAttached(nodeName, volumePath) + expectVolumeToBeAttached(ctx, nodeName, volumePath) ginkgo.By("Deleting pod") - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err) ginkgo.By("Waiting for volumes to be detached from the node") - err = waitForVSphereDiskToDetach(volumePath, nodeName) + err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) framework.ExpectNoError(err) }) @@ -121,7 +121,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v */ ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func(ctx context.Context) { scParameters[Datastore] = clusterDatastore - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) /* @@ -132,6 +132,6 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func(ctx context.Context) { policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) scParameters[SpbmStoragePolicy] = policyDatastoreCluster - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index a55d94560d4..fda2d86db53 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -59,13 +59,13 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", scParameters map[string]string vSphereCSIMigrationEnabled bool ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name scParameters = make(map[string]string) - _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + _, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) vSphereCSIMigrationEnabled = GetAndExpectBoolEnvVar(VSphereCSIMigrationEnabled) }) @@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", ginkgo.By("Invoking Test for invalid datastore") scParameters[Datastore] = invalidDatastore scParameters[DiskFormat] = ThinDisk - err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) + err := invokeInvalidDatastoreTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) var errorMsg string if !vSphereCSIMigrationEnabled { @@ -88,22 +88,22 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", }) }) -func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { +func invokeInvalidDatastoreTestNeg(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With Invalid Datastore") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) ginkgo.By("Expect claim to fail provisioning volume") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) var eventErrorMessages string diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index da02194ab28..234afacb974 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -69,12 +69,12 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { nodeKeyValueLabel map[string]string nodeLabelValue string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - nodeName = GetReadySchedulableRandomNodeInfo().Name + nodeName = GetReadySchedulableRandomNodeInfo(ctx).Name nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue} e2enode.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) @@ -83,19 +83,19 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: eagerzeroedthick") - invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") + invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") }) ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: zeroedthick") - invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") + invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") }) ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: thin") - invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin") + invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "thin") }) }) -func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) { +func invokeTest(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) { framework.Logf("Invoking Test for DiskFomat: %s", diskFormat) scParameters := make(map[string]string) @@ -103,28 +103,28 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating Storage Class With DiskFormat") storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec, metav1.CreateOptions{}) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvclaimSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.CoreV1().PersistentVolumeClaims(namespace).Delete), pvclaimSpec.Name, metav1.DeleteOptions{}) ginkgo.By("Waiting for claim to be in bound phase") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) // Get new copy of the claim - pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) + pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) /* @@ -134,37 +134,37 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be running") - gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) - isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) + isAttached, err := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, nodeName) if !isAttached { framework.Failf("Volume: %s is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) } framework.ExpectNoError(err) ginkgo.By("Verify Disk Format") - framework.ExpectEqual(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat), true, "DiskFormat Verification Failed") + framework.ExpectEqual(verifyDiskFormat(ctx, client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat), true, "DiskFormat Verification Failed") var volumePaths []string volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) ginkgo.By("Delete pod and wait for volume to be detached from node") - deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, client, pod, nodeName, volumePaths) } -func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { +func verifyDiskFormat(ctx context.Context, client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { ginkgo.By("Verifying disk format") eagerlyScrub := false thinProvisioned := false diskFound := false pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index 76cdb2b4393..a99b93406cf 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -71,25 +71,25 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { expectedDiskSize := "1Mi" ginkgo.By("Creating Storage Class") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) ginkgo.By("Waiting for claim to be in bound phase") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectNoError(err) ginkgo.By("Getting new copy of PVC") - pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) + pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Getting PV created") - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying if provisioned PV has the correct size") diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 008acc7380a..aac7010f98e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -75,71 +75,71 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { client clientset.Interface namespace string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos(ctx)).NotTo(gomega.BeEmpty()) }) ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: ext3") - invokeTestForFstype(f, client, namespace, ext3FSType, ext3FSType) + invokeTestForFstype(ctx, f, client, namespace, ext3FSType, ext3FSType) }) ginkgo.It("verify fstype - default value should be ext4", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: Default Value - ext4") - invokeTestForFstype(f, client, namespace, "", ext4FSType) + invokeTestForFstype(ctx, f, client, namespace, "", ext4FSType) }) ginkgo.It("verify invalid fstype", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: invalid Value") - invokeTestForInvalidFstype(f, client, namespace, invalidFSType) + invokeTestForInvalidFstype(ctx, f, client, namespace, invalidFSType) }) }) -func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) { +func invokeTestForFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) { framework.Logf("Invoking Test for fstype: %s", fstype) scParameters := make(map[string]string) scParameters["fstype"] = fstype // Create Persistent Volume ginkgo.By("Creating Storage Class With Fstype") - pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters) + pvclaim, persistentvolumes := createVolume(ctx, client, f.Timeouts, namespace, scParameters) // Create Pod and verify the persistent volume is accessible - pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes) + pod := createPodAndVerifyVolumeAccessible(ctx, client, namespace, pvclaim, persistentvolumes) _, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) framework.ExpectNoError(err) // Detach and delete volume - detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) - err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + detachVolume(ctx, f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) framework.ExpectNoError(err) } -func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { +func invokeTestForInvalidFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, fstype string) { scParameters := make(map[string]string) scParameters["fstype"] = fstype // Create Persistent Volume ginkgo.By("Creating Storage Class With Invalid Fstype") - pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters) + pvclaim, persistentvolumes := createVolume(ctx, client, f.Timeouts, namespace, scParameters) ginkgo.By("Creating pod to attach PV to the node") var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) // Create pod to attach Volume to Node - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand) + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, execCommand) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // Detach and delete volume - detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) - err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + detachVolume(ctx, f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) framework.ExpectNoError(err) gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) @@ -155,45 +155,46 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa } } -func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) +func createVolume(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for claim to be in bound phase") - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) framework.ExpectNoError(err) return pvclaim, persistentvolumes } -func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { +func createPodAndVerifyVolumeAccessible(ctx context.Context, client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand) + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, execCommand) framework.ExpectNoError(err) // Asserts: Right disk is attached to the pod ginkgo.By("Verify the volume is accessible and available in the pod") - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) return pod } // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, -func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) +func detachVolume(ctx context.Context, f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName ginkgo.By("Deleting pod") - e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) + framework.ExpectNoError(err) ginkgo.By("Waiting for volumes to be detached from the node") - waitForVSphereDiskToDetach(volPath, nodeName) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volPath, nodeName)) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 01e2e78a9c6..5f7a806df5b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -41,10 +41,10 @@ import ( ) // waitForKubeletUp waits for the kubelet on the given host to be up. -func waitForKubeletUp(host string) error { +func waitForKubeletUp(ctx context.Context, host string) error { cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) } @@ -56,18 +56,18 @@ func waitForKubeletUp(host string) error { } // restartKubelet restarts kubelet on the given host. -func restartKubelet(host string) error { +func restartKubelet(ctx context.Context, host string) error { var cmd string var sudoPresent bool - sshResult, err := e2essh.SSH("sudo --version", host, framework.TestContext.Provider) + sshResult, err := e2essh.SSH(ctx, "sudo --version", host, framework.TestContext.Provider) if err != nil { return fmt.Errorf("Unable to ssh to host %s with error %v", host, err) } if !strings.Contains(sshResult.Stderr, "command not found") { sudoPresent = true } - sshResult, err = e2essh.SSH("systemctl --version", host, framework.TestContext.Provider) + sshResult, err = e2essh.SSH(ctx, "systemctl --version", host, framework.TestContext.Provider) if err != nil { return fmt.Errorf("Failed to execute command 'systemctl' on host %s with error %v", host, err) } @@ -81,7 +81,7 @@ func restartKubelet(host string) error { } framework.Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd) - result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) + result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) return fmt.Errorf("couldn't restart kubelet: %v", err) @@ -115,14 +115,14 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup nodeNameList []string nodeInfo *NodeInfo ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) - nodes, err := e2enode.GetReadySchedulableNodes(client) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) numNodes = len(nodes.Items) if numNodes < 2 { @@ -152,30 +152,30 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) nodeName := pod.Spec.NodeName ginkgo.By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) - expectVolumeToBeAttached(nodeName, volumePath) + expectVolumeToBeAttached(ctx, nodeName, volumePath) } ginkgo.By("Restarting kubelet on instance node") instanceAddress := framework.APIAddress() + ":22" - err := restartKubelet(instanceAddress) + err := restartKubelet(ctx, instanceAddress) framework.ExpectNoError(err, "Unable to restart kubelet on instance node") ginkgo.By("Verifying the kubelet on instance node is up") - err = waitForKubeletUp(instanceAddress) + err = waitForKubeletUp(ctx, instanceAddress) framework.ExpectNoError(err) for i, pod := range pods { @@ -183,14 +183,14 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup nodeName := pod.Spec.NodeName ginkgo.By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) - expectVolumeToBeAttached(nodeName, volumePath) + expectVolumeToBeAttached(ctx, nodeName, volumePath) ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) - err = waitForVSphereDiskToDetach(volumePath, nodeName) + err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index b8c8eff664a..b065681f545 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -41,19 +41,19 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] err error ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(err) workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") }) ginkgo.It("node unregister", func(ctx context.Context) { ginkgo.By("Get total Ready nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] // Find VM .vmx file path, host, resource pool. // They are required to register a node VM to VC - vmxFilePath := getVMXFilePath(vmObject) + vmxFilePath := getVMXFilePath(ctx, vmObject) vmHost, err := vmObject.HostSystem(ctx) framework.ExpectNoError(err) @@ -77,13 +77,13 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] // Unregister Node VM ginkgo.By("Unregister a node VM") - unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject) + unregisterNodeVM(ctx, nodeVM.ObjectMeta.Name, vmObject) // Ready nodes should be 1 less ginkgo.By("Verifying the ready node counts") - framework.ExpectEqual(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1), true, "Unable to verify expected ready node count") + framework.ExpectEqual(verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount-1), true, "Unable to verify expected ready node count") - nodeList, err = e2enode.GetReadySchedulableNodes(client) + nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) var nodeNameList []string @@ -94,13 +94,13 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] // Register Node VM ginkgo.By("Register back the node VM") - registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) + registerNodeVM(ctx, nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) // Ready nodes should be equal to earlier count ginkgo.By("Verifying the ready node counts") - framework.ExpectEqual(verifyReadyNodeCount(f.ClientSet, totalNodesCount), true, "Unable to verify expected ready node count") + framework.ExpectEqual(verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount), true, "Unable to verify expected ready node count") - nodeList, err = e2enode.GetReadySchedulableNodes(client) + nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) nodeNameList = nodeNameList[:0] @@ -114,6 +114,6 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] scParameters := make(map[string]string) storagePolicy := GetAndExpectStringEnvVar("VSPHERE_SPBM_GOLD_POLICY") scParameters[SpbmStoragePolicy] = storagePolicy - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 71182193401..309f4a62c32 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -53,13 +53,13 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", namespace string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) @@ -84,36 +84,36 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.It("verify volume status after node power off", func(ctx context.Context) { ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) - pvclaim, err := e2epv.CreatePVC(client, namespace, pvclaimSpec) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, pvclaimSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) ginkgo.By("Waiting for PVC to be in bound phase") pvclaims := []*v1.PersistentVolumeClaim{pvclaim} - pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision) + pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) volumePath := pvs[0].Spec.VsphereVolume.VolumePath ginkgo.By("Creating a Deployment") - deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") + deployment, err := e2edeployment.CreateDeployment(ctx, client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{}) ginkgo.By("Get pod from the deployment") - podList, err := e2edeployment.GetPodsForDeployment(client, deployment) + podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err)) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] node1 := pod.Spec.NodeName ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) - isAttached, err := diskIsAttached(volumePath, node1) + isAttached, err := diskIsAttached(ctx, volumePath, node1) framework.ExpectNoError(err) if !isAttached { framework.Failf("Volume: %s is not attached to the node: %v", volumePath, node1) @@ -131,15 +131,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", framework.ExpectNoError(err, "Unable to power off the node") // Waiting for the pod to be failed over to a different node - node2, err := waitForPodToFailover(client, deployment, node1) + node2, err := waitForPodToFailover(ctx, client, deployment, node1) framework.ExpectNoError(err, "Pod did not fail over to a different node") ginkgo.By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) - err = waitForVSphereDiskToAttach(volumePath, node2) + err = waitForVSphereDiskToAttach(ctx, volumePath, node2) framework.ExpectNoError(err, "Disk is not attached to the node") ginkgo.By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) - err = waitForVSphereDiskToDetach(volumePath, node1) + err = waitForVSphereDiskToDetach(ctx, volumePath, node1) framework.ExpectNoError(err, "Disk is not detached from the node") ginkgo.By(fmt.Sprintf("Power on the previous node: %v", node1)) @@ -150,14 +150,14 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", }) // Wait until the pod failed over to a different node, or time out after 3 minutes -func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deployment, oldNode string) (string, error) { +func waitForPodToFailover(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment, oldNode string) (string, error) { var ( timeout = 3 * time.Minute pollTime = 10 * time.Second ) - waitErr := wait.Poll(pollTime, timeout, func() (bool, error) { - currentNode, err := getNodeForDeployment(client, deployment) + waitErr := wait.PollWithContext(ctx, pollTime, timeout, func(ctx context.Context) (bool, error) { + currentNode, err := getNodeForDeployment(ctx, client, deployment) if err != nil { return true, err } @@ -178,12 +178,12 @@ func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deploym return "", fmt.Errorf("pod did not fail over from %q: %v", oldNode, waitErr) } - return getNodeForDeployment(client, deployment) + return getNodeForDeployment(ctx, client, deployment) } // getNodeForDeployment returns node name for the Deployment -func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) { - podList, err := e2edeployment.GetPodsForDeployment(client, deployment) +func getNodeForDeployment(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment) (string, error) { + podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) if err != nil { return "", err } diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 13197e1c4d4..5629428f5aa 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -65,12 +65,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { err error volumeOpsScale int ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos(ctx)).NotTo(gomega.BeEmpty()) if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" { volumeOpsScale, err = strconv.Atoi(scale) framework.ExpectNoError(err) @@ -79,13 +79,13 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { } pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting PVCs") for _, claim := range pvclaims { - e2epv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + _ = e2epv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) } ginkgo.By("Deleting StorageClass") - err = client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -94,34 +94,34 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" - storageclass, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("thinsc", scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("thinsc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating PVCs using the Storage Class") count := 0 for count < volumeOpsScale { - pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaims[count], err = e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) count++ } ginkgo.By("Waiting for all claims to be in bound phase") - persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision) + persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("Creating pod to attach PVs to the node") - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) ginkgo.By("Verify all volumes are accessible and available in the pod") - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) ginkgo.By("Deleting pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) ginkgo.By("Waiting for volumes to be detached from the node") for _, pv := range persistentvolumes { - waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) } }) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index a44fdb5d647..c1b98c1b6a8 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { iterations int ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -84,7 +84,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) - nodes, err := e2enode.GetReadySchedulableNodes(client) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) @@ -98,14 +98,14 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { }) ginkgo.It("vcp performance tests", func(ctx context.Context) { - scList := getTestStorageClasses(client, policyName, datastoreName) + scList := getTestStorageClasses(ctx, client, policyName, datastoreName) for _, sc := range scList { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{}) } sumLatency := make(map[string]float64) for i := 0; i < iterations; i++ { - latency := invokeVolumeLifeCyclePerformance(f, client, namespace, scList, volumesPerPod, volumeCount, nodeSelectorList) + latency := invokeVolumeLifeCyclePerformance(ctx, f, client, namespace, scList, volumesPerPod, volumeCount, nodeSelectorList) for key, val := range latency { sumLatency[key] += val } @@ -121,7 +121,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { }) }) -func getTestStorageClasses(client clientset.Interface, policyName, datastoreName string) []*storagev1.StorageClass { +func getTestStorageClasses(ctx context.Context, client clientset.Interface, policyName, datastoreName string) []*storagev1.StorageClass { const ( storageclass1 = "sc-default" storageclass2 = "sc-vsan" @@ -137,23 +137,23 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec, metav1.CreateOptions{}) + sc, err = client.StorageV1().StorageClasses().Create(ctx, scWithDatastoreSpec, metav1.CreateOptions{}) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) @@ -163,7 +163,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName } // invokeVolumeLifeCyclePerformance peforms full volume life cycle management and records latency for each operation -func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) { +func invokeVolumeLifeCyclePerformance(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) { var ( totalpvclaims [][]*v1.PersistentVolumeClaim totalpvs [][]*v1.PersistentVolume @@ -179,14 +179,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I var pvclaims []*v1.PersistentVolumeClaim for j := 0; j < volumesPerPod; j++ { currsc := sc[((i*numPods)+j)%len(sc)] - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc)) framework.ExpectNoError(err) pvclaims = append(pvclaims, pvclaim) } totalpvclaims = append(totalpvclaims, pvclaims) } for _, pvclaims := range totalpvclaims { - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) totalpvs = append(totalpvs, persistentvolumes) } @@ -197,7 +197,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I start = time.Now() for i, pvclaims := range totalpvclaims { nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] - pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") framework.ExpectNoError(err) totalpods = append(totalpods, pod) @@ -207,13 +207,13 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I latency[AttachOp] = elapsed.Seconds() for i, pod := range totalpods { - verifyVSphereVolumesAccessible(client, pod, totalpvs[i]) + verifyVSphereVolumesAccessible(ctx, client, pod, totalpvs[i]) } ginkgo.By("Deleting pods") start = time.Now() for _, pod := range totalpods { - err := e2epod.DeletePodWithWait(client, pod) + err := e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err) } elapsed = time.Since(start) @@ -225,14 +225,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I } } - err := waitForVSphereDisksToDetach(nodeVolumeMap) + err := waitForVSphereDisksToDetach(ctx, nodeVolumeMap) framework.ExpectNoError(err) ginkgo.By("Deleting the PVCs") start = time.Now() for _, pvclaims := range totalpvclaims { for _, pvc := range pvclaims { - err = e2epv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index faeba671f20..0fa945ea8ef 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -54,13 +54,13 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { nodeInfo *NodeInfo vsp *VSphere ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) - node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns) ginkgo.DeferCleanup(func() { if len(node1KeyValueLabel) > 0 { e2enode.RemoveLabelOffNode(c, node1Name, NodeLabelKey) @@ -99,24 +99,24 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { ginkgo.It("should create and delete pod with the same volume source on the same worker node", func(ctx context.Context) { var volumeFiles []string - pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) volumeFiles = append(volumeFiles, newEmptyFileName) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) - pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) volumeFiles = append(volumeFiles, newEmptyFileName) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) }) /* @@ -139,23 +139,23 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func(ctx context.Context) { var volumeFiles []string - pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) volumeFiles = append(volumeFiles, newEmptyFileName) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) - pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths) + pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node2Name, node2KeyValueLabel, volumePaths) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) volumeFiles = append(volumeFiles, newEmptyFileName) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node2Name, volumePaths) }) /* @@ -179,7 +179,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { volumePaths = append(volumePaths, volumePath) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) - pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod volumeFiles := []string{ @@ -187,9 +187,9 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), } createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) - pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod newEmptyFilesNames := []string{ @@ -227,7 +227,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { volumePaths = append(volumePaths, volumePath) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) - pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -236,10 +236,10 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), } createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) - pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) + pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod newEmptyFileNames := []string{ @@ -249,7 +249,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { volumeFiles = append(volumeFiles, newEmptyFileNames[0]) volumeFiles = append(volumeFiles, newEmptyFileNames[1]) createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles) - deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) + deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) }) /* @@ -278,11 +278,11 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { defer func() { ginkgo.By("clean up undeleted pods") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "defer: Failed to delete pod ", podA.Name) - framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "defer: Failed to delete pod ", podB.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podA), "defer: Failed to delete pod ", podA.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podB), "defer: Failed to delete pod ", podB.Name) ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) for _, volumePath := range volumePaths { - framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name)) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, node1Name)) } }() @@ -296,10 +296,10 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { for index := 0; index < 5; index++ { ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) - podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) + podA = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) - podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) + podB = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1) @@ -320,15 +320,15 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) ginkgo.By("Deleting pod-A") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "Failed to delete pod ", podA.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podA), "Failed to delete pod ", podA.Name) ginkgo.By("Deleting pod-B") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "Failed to delete pod ", podB.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podB), "Failed to delete pod ", podB.Name) } }) }) -func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) { - nodes, err := e2enode.GetBoundedReadySchedulableNodes(client, 2) +func testSetupVolumePlacement(ctx context.Context, client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) { + nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, client, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) @@ -347,20 +347,20 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel } -func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { +func createPodWithVolumeAndNodeSelector(ctx context.Context, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { var pod *v1.Pod var err error ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) - pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) + pod, err = client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) for _, volumePath := range volumePaths { - isAttached, err := diskIsAttached(volumePath, nodeName) + isAttached, err := diskIsAttached(ctx, volumePath, nodeName) framework.ExpectNoError(err) if !isAttached { framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) @@ -379,12 +379,12 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...) } -func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { +func deletePodAndWaitForVolumeToDetach(ctx context.Context, f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { ginkgo.By("Deleting pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod), "Failed to delete pod ", pod.Name) ginkgo.By("Waiting for volume to be detached from the node") for _, volumePath := range volumePaths { - framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName)) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, nodeName)) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index bad5fccb530..2d3da8272f7 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -73,16 +73,16 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs vcNodesMap map[string][]node ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // Requires SSH access to vCenter. e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) - nodes, err := e2enode.GetReadySchedulableNodes(client) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) numNodes := len(nodes.Items) @@ -123,19 +123,19 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) - gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) nodeName := pod.Spec.NodeName ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) - expectVolumeToBeAttached(nodeName, volumePath) + expectVolumeToBeAttached(ctx, nodeName, volumePath) ginkgo.By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) @@ -148,17 +148,17 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs ginkgo.By("Stopping vpxd on the vCenter host") vcAddress := vcHost + ":22" - err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress) + err := invokeVCenterServiceControl(ctx, "stop", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host") expectFilesToBeAccessible(namespace, pods, filePaths) expectFileContentsToMatch(namespace, pods, filePaths, fileContents) ginkgo.By("Starting vpxd on the vCenter host") - err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress) + err = invokeVCenterServiceControl(ctx, "start", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host") - expectVolumesToBeAttached(pods, volumePaths) + expectVolumesToBeAttached(ctx, pods, volumePaths) expectFilesToBeAccessible(namespace, pods, filePaths) expectFileContentsToMatch(namespace, pods, filePaths, fileContents) @@ -168,11 +168,11 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs volumePath := volumePaths[i] ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) - err = e2epod.DeletePodWithWait(client, pod) + err = e2epod.DeletePodWithWait(ctx, client, pod) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) - err = waitForVSphereDiskToDetach(volumePath, nodeName) + err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 09601a1f272..3d3c485923b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -85,7 +85,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp policyName string tagPolicy string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy) framework.Logf("framework: %+v", f) scParameters = make(map[string]string) - _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + _, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) }) @@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) // Valid policy. @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyDiskStripes] = "1" scParameters[PolicyObjectSpaceReservation] = "30" framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) // Valid policy. @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = vsanDatastore framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) // Valid policy. @@ -132,7 +132,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) // Invalid VSAN storage capabilities parameters. @@ -141,7 +141,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" if !strings.Contains(err.Error(), errorMsg) { @@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "Invalid value for " + PolicyDiskStripes + "." if !strings.Contains(err.Error(), errorMsg) { @@ -170,7 +170,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "." if !strings.Contains(err.Error(), errorMsg) { @@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = vmfsDatastore framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "The specified datastore: \\\"" + vmfsDatastore + "\\\" is not a VSAN datastore. " + "The policy parameters will work only with VSAN Datastore." @@ -200,7 +200,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[SpbmStoragePolicy] = policyName scParameters[DiskFormat] = ThinDisk framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) - invokeValidPolicyTest(f, client, namespace, scParameters) + invokeValidPolicyTest(ctx, f, client, namespace, scParameters) }) ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func(ctx context.Context) { @@ -209,9 +209,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Datastore] = vsanDatastore framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName) - controlPlaneNode, err := getControlPlaneNode(client) + controlPlaneNode, err := getControlPlaneNode(ctx, client) framework.ExpectNoError(err) - invokeStaleDummyVMTestWithStoragePolicy(client, controlPlaneNode, namespace, kubernetesClusterName, scParameters) + invokeStaleDummyVMTestWithStoragePolicy(ctx, client, controlPlaneNode, namespace, kubernetesClusterName, scParameters) }) ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func(ctx context.Context) { @@ -220,7 +220,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[Datastore] = vsanDatastore scParameters[DiskFormat] = ThinDisk framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" if !strings.Contains(err.Error(), errorMsg) { @@ -233,7 +233,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[SpbmStoragePolicy] = BronzeStoragePolicy scParameters[DiskFormat] = ThinDisk framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" if !strings.Contains(err.Error(), errorMsg) { @@ -248,7 +248,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal scParameters[DiskFormat] = ThinDisk framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) - err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) + err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) framework.ExpectError(err) errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" if !strings.Contains(err.Error(), errorMsg) { @@ -257,77 +257,77 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) }) -func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { +func invokeValidPolicyTest(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for claim to be in bound phase") - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) ginkgo.By("Verify the volume is accessible and available in the pod") - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) ginkgo.By("Deleting pod") - e2epod.DeletePodWithWait(client, pod) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) ginkgo.By("Waiting for volumes to be detached from the node") - waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) } -func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { +func invokeInvalidPolicyTestNeg(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) ginkgo.By("Waiting for claim to be in bound phase") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } // invokeStaleDummyVMTestWithStoragePolicy assumes control plane node is present on the datacenter specified in the workspace section of vsphere.conf file. // With in-tree VCP, when the volume is created using storage policy, shadow (dummy) VM is getting created and deleted to apply SPBM policy on the volume. -func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, controlPlaneNode string, namespace string, clusterName string, scParameters map[string]string) { +func invokeStaleDummyVMTestWithStoragePolicy(ctx context.Context, client clientset.Interface, controlPlaneNode string, namespace string, clusterName string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Expect claim to fail provisioning volume") - _, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) + _, err = e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*time.Minute) framework.ExpectError(err) - updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) + updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) @@ -346,11 +346,11 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control } } -func getControlPlaneNode(client clientset.Interface) (string, error) { +func getControlPlaneNode(ctx context.Context, client clientset.Interface) (string, error) { regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") - podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{}) + podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{}) if err != nil { return "", err } diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index ed295fc9509..27863631b7b 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -105,11 +105,11 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { zoneD string invalidZone string ) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet - e2eskipper.SkipUnlessMultizone(client) + e2eskipper.SkipUnlessMultizone(ctx, client) namespace = f.Namespace.Name vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1) vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2) @@ -123,27 +123,27 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { invalidZone = GetAndExpectStringEnvVar(VCPInvalidZone) scParameters = make(map[string]string) zones = make([]string, 0) - _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + _, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) }) ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) zones = append(zones, zoneA) - verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones) + verifyPVZoneLabels(ctx, client, f.Timeouts, namespace, nil, zones) }) ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) - verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones) + verifyPVZoneLabels(ctx, client, f.Timeouts, namespace, nil, zones) }) ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone)) zones = append(zones, invalidZone) - err := verifyPVCCreationFails(client, namespace, nil, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "") framework.ExpectError(err) errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]" if !strings.Contains(err.Error(), errorMsg) { @@ -154,28 +154,28 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, "") }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, "") }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneC) - err := verifyPVCCreationFails(client, namespace, scParameters, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "") errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -186,21 +186,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneB) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy zones = append(zones, zoneA) - err := verifyPVCCreationFails(client, namespace, scParameters, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "") errorMsg := "No compatible datastores found that satisfy the storage policy requirements" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -212,7 +212,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func(ctx context.Context) { @@ -220,7 +220,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) - err := verifyPVCCreationFails(client, namespace, scParameters, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "") errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -232,7 +232,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore2 zones = append(zones, zoneC) - err := verifyPVCCreationFails(client, namespace, scParameters, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "") errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with no zones")) - err := verifyPVCCreationFails(client, namespace, nil, nil, "") + err := verifyPVCCreationFails(ctx, client, namespace, nil, nil, "") errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -251,7 +251,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 - err := verifyPVCCreationFails(client, namespace, scParameters, nil, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "") errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -261,7 +261,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy - err := verifyPVCCreationFails(client, namespace, scParameters, nil, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "") errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -272,7 +272,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 - err := verifyPVCCreationFails(client, namespace, scParameters, nil, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "") errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) zones = append(zones, zoneC) - err := verifyPVCCreationFails(client, namespace, nil, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "") errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -293,7 +293,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) - err := verifyPVCCreationFails(client, namespace, nil, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "") errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -304,7 +304,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal zones = append(zones, zoneA) - err := verifyPVCCreationFails(client, namespace, scParameters, zones, "") + err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "") errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -317,47 +317,47 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD)) zones = append(zones, zoneD) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore)) scParameters[Datastore] = localDatastore zones = append(zones, zoneD) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "") }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer) + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer) }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) }) ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) - verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer) + verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer) }) ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) - err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client, namespace, nil, zones) + err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx, client, namespace, nil, zones) framework.ExpectError(err) errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC if !strings.Contains(err.Error(), errorMsg) { @@ -374,17 +374,17 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { // nodeSelector set as zoneB v1.LabelTopologyZone: zoneB, } - verifyPodSchedulingFails(client, namespace, nodeSelectorMap, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) + verifyPodSchedulingFails(ctx, client, namespace, nodeSelectorMap, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) }) }) -func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) +func verifyPVCAndPodCreationSucceeds(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) @@ -394,39 +394,39 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *frame var persistentvolumes []*v1.PersistentVolume // If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now. if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { - persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision) + persistentvolumes = waitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) } ginkgo.By("Creating pod to attach PV to the node") - pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "") + pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { - persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision) + persistentvolumes = waitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) } if zones != nil { ginkgo.By("Verify persistent volume was created on the right zone") - verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) + verifyVolumeCreationOnRightZone(ctx, persistentvolumes, pod.Spec.NodeName, zones) } ginkgo.By("Verify the volume is accessible and available in the pod") - verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) ginkgo.By("Deleting pod") - e2epod.DeletePodWithWait(client, pod) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) ginkgo.By("Waiting for volumes to be detached from the node") - waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) } -func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) +func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) @@ -435,15 +435,15 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I ginkgo.By("Creating a pod") pod := e2epod.MakePod(namespace, nil, pvclaims, false, "") - pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) ginkgo.By("Waiting for claim to be in bound phase") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // Look for PVC ProvisioningFailed event and return the message. @@ -455,20 +455,20 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I return nil } -func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) []*v1.PersistentVolume { +func waitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) []*v1.PersistentVolume { ginkgo.By("Waiting for claim to be in bound phase") - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeout) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeout) framework.ExpectNoError(err) return persistentvolumes } -func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) +func verifyPodSchedulingFails(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) @@ -476,46 +476,46 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node pvclaims = append(pvclaims, pvclaim) ginkgo.By("Creating a pod") - pod, err := e2epod.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "") + pod, err := e2epod.CreateUnschedulablePod(ctx, client, namespace, nodeSelector, pvclaims, false, "") framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) } -func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) +func verifyPVCCreationFails(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) ginkgo.By("Waiting for claim to be in bound phase") - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) framework.Logf("Failure message : %+q", eventList.Items[0].Message) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } -func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) { - storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) +func verifyPVZoneLabels(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) { + storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the storage class") - pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) ginkgo.By("Waiting for claim to be in bound phase") - persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision) + persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("Verify zone information is present in the volume labels") diff --git a/test/e2e/suites.go b/test/e2e/suites.go index d2d4ccaddc2..dc26c3c733f 100644 --- a/test/e2e/suites.go +++ b/test/e2e/suites.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "fmt" "os" "path" @@ -28,23 +29,23 @@ import ( ) // AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite -func AfterSuiteActions() { +func AfterSuiteActions(ctx context.Context) { // Run only Ginkgo on node 1 framework.Logf("Running AfterSuite actions on node 1") if framework.TestContext.ReportDir != "" { framework.CoreDump(framework.TestContext.ReportDir) } if framework.TestContext.GatherSuiteMetricsAfterTest { - if err := gatherTestSuiteMetrics(); err != nil { + if err := gatherTestSuiteMetrics(ctx); err != nil { framework.Logf("Error gathering metrics: %v", err) } } - if framework.TestContext.NodeKiller.Enabled { - close(framework.TestContext.NodeKiller.NodeKillerStopCh) + if framework.TestContext.NodeKiller.NodeKillerStop != nil { + framework.TestContext.NodeKiller.NodeKillerStop() } } -func gatherTestSuiteMetrics() error { +func gatherTestSuiteMetrics(ctx context.Context) error { framework.Logf("Gathering metrics") config, err := framework.LoadConfig() if err != nil { @@ -56,12 +57,12 @@ func gatherTestSuiteMetrics() error { } // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). - grabber, err := e2emetrics.NewMetricsGrabber(c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) + grabber, err := e2emetrics.NewMetricsGrabber(ctx, c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) if err != nil { return fmt.Errorf("failed to create MetricsGrabber: %v", err) } - received, err := grabber.Grab() + received, err := grabber.Grab(ctx) if err != nil { return fmt.Errorf("failed to grab metrics: %v", err) } diff --git a/test/e2e/upgrades/apps/cassandra.go b/test/e2e/upgrades/apps/cassandra.go index 9c267696043..1b1e83dadbc 100644 --- a/test/e2e/upgrades/apps/cassandra.go +++ b/test/e2e/upgrades/apps/cassandra.go @@ -78,7 +78,7 @@ func cassandraKubectlCreate(ns, file string) { // the upgrade. // It waits for the system to stabilize before adding two users to verify // connectivity. -func (t *CassandraUpgradeTest) Setup(f *framework.Framework) { +func (t *CassandraUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name statefulsetPoll := 30 * time.Second statefulsetTimeout := 10 * time.Minute @@ -87,14 +87,14 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) { cassandraKubectlCreate(ns, "pdb.yaml") ginkgo.By("Creating a Cassandra StatefulSet") - e2estatefulset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns) + e2estatefulset.CreateStatefulSet(ctx, f.ClientSet, cassandraManifestPath, ns) ginkgo.By("Creating a cassandra-test-server deployment") cassandraKubectlCreate(ns, "tester.yaml") ginkgo.By("Getting the ingress IPs from the services") - err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { + err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { + if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { return false, nil } if _, err := t.listUsers(); err != nil { @@ -159,8 +159,8 @@ func (t *CassandraUpgradeTest) addUser(name string) error { } // getServiceIP is a helper method to extract the Ingress IP from the service. -func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) +func (t *CassandraUpgradeTest) getServiceIP(ctx context.Context, f *framework.Framework, ns, svcName string) string { + svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { @@ -174,7 +174,7 @@ func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName // from the db. Each attempt is tallied and at the end we verify if the success // ratio is over a certain threshold (0.75). We also verify that we get // at least the same number of rows back as we successfully wrote. -func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *CassandraUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { ginkgo.By("Continuously polling the database during upgrade.") var ( success, failures, writeAttempts, lastUserCount int @@ -219,7 +219,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{} } // Teardown does one final check of the data's availability. -func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) { +func (t *CassandraUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { users, err := t.listUsers() framework.ExpectNoError(err) gomega.Expect(len(users)).To(gomega.BeNumerically(">=", t.successfulWrites), "len(users) is too small") diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 646ae218051..f5074af437d 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -40,7 +40,7 @@ type DaemonSetUpgradeTest struct { func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" } // Setup creates a DaemonSet and verifies that it's running -func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { +func (t *DaemonSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { daemonSetName := "ds1" labelSet := map[string]string{"ds-name": daemonSetName} image := framework.ServeHostnameImage @@ -54,38 +54,38 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a DaemonSet") var err error - if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(context.TODO(), t.daemonSet, metav1.CreateOptions{}); err != nil { + if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(ctx, t.daemonSet, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) } ginkgo.By("Waiting for DaemonSet pods to become ready") - err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { - return e2edaemonset.CheckRunningOnAllNodes(f, t.daemonSet) + err = wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) { + return e2edaemonset.CheckRunningOnAllNodes(ctx, f, t.daemonSet) }) framework.ExpectNoError(err) ginkgo.By("Validating the DaemonSet after creation") - t.validateRunningDaemonSet(f) + t.validateRunningDaemonSet(ctx, f) } // Test waits until the upgrade has completed and then verifies that the DaemonSet // is still running -func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *DaemonSetUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { ginkgo.By("Waiting for upgrade to complete before re-validating DaemonSet") <-done ginkgo.By("validating the DaemonSet is still running after upgrade") - t.validateRunningDaemonSet(f) + t.validateRunningDaemonSet(ctx, f) } // Teardown cleans up any remaining resources. -func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) { +func (t *DaemonSetUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } -func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) { +func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(ctx context.Context, f *framework.Framework) { ginkgo.By("confirming the DaemonSet pods are running on all expected nodes") - res, err := e2edaemonset.CheckRunningOnAllNodes(f, t.daemonSet) + res, err := e2edaemonset.CheckRunningOnAllNodes(ctx, f, t.daemonSet) framework.ExpectNoError(err) if !res { framework.Failf("expected DaemonSet pod to be running on all nodes, it was not") @@ -93,6 +93,6 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) // DaemonSet resource itself should be good ginkgo.By("confirming the DaemonSet resource is in a good state") - err = e2edaemonset.CheckDaemonStatus(f, t.daemonSet.Name) + err = e2edaemonset.CheckDaemonStatus(ctx, f, t.daemonSet.Name) framework.ExpectNoError(err) } diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index ba1140ff6ec..818eceff93d 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -57,7 +57,7 @@ type DeploymentUpgradeTest struct { func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" } // Setup creates a deployment and makes sure it has a new and an old replicaset running. -func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { +func (t *DeploymentUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { c := f.ClientSet nginxImage := imageutils.GetE2EImage(imageutils.Nginx) @@ -67,7 +67,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) - deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{}) + deployment, err := deploymentClient.Create(ctx, d, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) @@ -76,14 +76,14 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) framework.ExpectNoError(err) - rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err := rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss := rsList.Items framework.ExpectEqual(len(rss), 1, "expected one replicaset, got %d", len(rss)) t.oldRSUID = rss[0].UID ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName)) - framework.ExpectNoError(waitForDeploymentRevision(c, deployment, "1")) + framework.ExpectNoError(waitForDeploymentRevision(ctx, c, deployment, "1")) // Trigger a new rollout so that we have some history. ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName)) @@ -96,7 +96,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment)) ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName)) - rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err = rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss = rsList.Items framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss)) @@ -112,13 +112,13 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { } ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName)) - framework.ExpectNoError(waitForDeploymentRevision(c, deployment, "2")) + framework.ExpectNoError(waitForDeploymentRevision(ctx, c, deployment, "2")) t.oldDeploymentUID = deployment.UID } // Test checks whether the replicasets for a deployment are the same after an upgrade. -func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *DeploymentUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { // Block until upgrade is done ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName)) <-done @@ -128,7 +128,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ deploymentClient := c.AppsV1().Deployments(ns) rsClient := c.AppsV1().ReplicaSets(ns) - deployment, err := deploymentClient.Get(context.TODO(), deploymentName, metav1.GetOptions{}) + deployment, err := deploymentClient.Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName)) @@ -137,7 +137,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) - rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err := rsClient.List(ctx, metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss := rsList.Items framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss)) @@ -169,14 +169,14 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ } // Teardown cleans up any remaining resources. -func (t *DeploymentUpgradeTest) Teardown(f *framework.Framework) { +func (t *DeploymentUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } // waitForDeploymentRevision waits for becoming the target revision of a delopyment. -func waitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error { +func waitForDeploymentRevision(ctx context.Context, c clientset.Interface, d *appsv1.Deployment, targetRevision string) error { err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(d.Namespace).Get(ctx, d.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/upgrades/apps/etcd.go b/test/e2e/upgrades/apps/etcd.go index aedb7d3291a..e9008695193 100644 --- a/test/e2e/upgrades/apps/etcd.go +++ b/test/e2e/upgrades/apps/etcd.go @@ -73,7 +73,7 @@ func kubectlCreate(ns, file string) { } // Setup creates etcd statefulset and then verifies that the etcd is writable. -func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { +func (t *EtcdUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name statefulsetPoll := 30 * time.Second statefulsetTimeout := 10 * time.Minute @@ -82,14 +82,14 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { kubectlCreate(ns, "pdb.yaml") ginkgo.By("Creating an etcd StatefulSet") - e2estatefulset.CreateStatefulSet(f.ClientSet, manifestPath, ns) + e2estatefulset.CreateStatefulSet(ctx, f.ClientSet, manifestPath, ns) ginkgo.By("Creating an etcd--test-server deployment") kubectlCreate(ns, "tester.yaml") ginkgo.By("Getting the ingress IPs from the services") - err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { + err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { + if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { return false, nil } if _, err := t.listUsers(); err != nil { @@ -151,8 +151,8 @@ func (t *EtcdUpgradeTest) addUser(name string) error { return nil } -func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) +func (t *EtcdUpgradeTest) getServiceIP(ctx context.Context, f *framework.Framework, ns, svcName string) string { + svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { @@ -162,7 +162,7 @@ func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName strin } // Test waits for upgrade to complete and verifies if etcd is writable. -func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *EtcdUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { ginkgo.By("Continuously polling the database during upgrade.") var ( success, failures, writeAttempts, lastUserCount int @@ -207,7 +207,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg } // Teardown does one final check of the data's availability. -func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) { +func (t *EtcdUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { users, err := t.listUsers() framework.ExpectNoError(err) gomega.Expect(len(users)).To(gomega.BeNumerically(">=", t.successfulWrites), "len(users) is too small") diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index c922989b575..eb59b4a8cdc 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -43,40 +43,40 @@ type JobUpgradeTest struct { func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" } // Setup starts a Job with a parallelism of 2 and 2 completions running. -func (t *JobUpgradeTest) Setup(f *framework.Framework) { +func (t *JobUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { t.namespace = f.Namespace.Name ginkgo.By("Creating a job") t.job = e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6) - job, err := e2ejob.CreateJob(f.ClientSet, t.namespace, t.job) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, t.namespace, t.job) t.job = job framework.ExpectNoError(err) ginkgo.By("Ensuring active pods == parallelism") - err = e2ejob.WaitForJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2) + err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, t.namespace, job.Name, 2) framework.ExpectNoError(err) } // Test verifies that the Jobs Pods are running after the an upgrade -func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *JobUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Ensuring active pods == parallelism") - err := ensureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) + err := ensureAllJobPodsRunning(ctx, f.ClientSet, t.namespace, t.job.Name, 2) framework.ExpectNoError(err) } // Teardown cleans up any remaining resources. -func (t *JobUpgradeTest) Teardown(f *framework.Framework) { +func (t *JobUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } // ensureAllJobPodsRunning uses c to check in the Job named jobName in ns // is running, returning an error if the expected parallelism is not // satisfied. -func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { +func ensureAllJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) + pods, err := c.CoreV1().Pods(ns).List(ctx, options) if err != nil { return err } diff --git a/test/e2e/upgrades/apps/mysql.go b/test/e2e/upgrades/apps/mysql.go index 6e3d934bc29..862ca715284 100644 --- a/test/e2e/upgrades/apps/mysql.go +++ b/test/e2e/upgrades/apps/mysql.go @@ -74,8 +74,8 @@ func mysqlKubectlCreate(ns, file string) { e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-") } -func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) +func (t *MySQLUpgradeTest) getServiceIP(ctx context.Context, f *framework.Framework, ns, svcName string) string { + svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { @@ -88,7 +88,7 @@ func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName stri // from the db. It then connects to the db with the write Service and populates the db with a table // and a few entries. Finally, it connects to the db with the read Service, and confirms the data is // available. The db connections are left open to be used later in the test. -func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { +func (t *MySQLUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ns := f.Namespace.Name statefulsetPoll := 30 * time.Second statefulsetTimeout := 10 * time.Minute @@ -97,14 +97,14 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { mysqlKubectlCreate(ns, "configmap.yaml") ginkgo.By("Creating a mysql StatefulSet") - e2estatefulset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns) + e2estatefulset.CreateStatefulSet(ctx, f.ClientSet, mysqlManifestPath, ns) ginkgo.By("Creating a mysql-test-server deployment") mysqlKubectlCreate(ns, "tester.yaml") ginkgo.By("Getting the ingress IPs from the test-service") - err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" { + err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { + if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { return false, nil } if _, err := t.countNames(); err != nil { @@ -130,7 +130,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { // Test continually polls the db using the read and write connections, inserting data, and checking // that all the data is readable. -func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *MySQLUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { var writeSuccess, readSuccess, writeFailure, readFailure int ginkgo.By("Continuously polling the database during upgrade.") go wait.Until(func() { @@ -174,7 +174,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up } // Teardown performs one final check of the data's availability. -func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) { +func (t *MySQLUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { count, err := t.countNames() framework.ExpectNoError(err) gomega.Expect(count).To(gomega.BeNumerically(">=", t.successfulWrites), "count is too small") diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index c20e6089f7d..8a7a3c6fbcd 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -49,24 +49,24 @@ type ReplicaSetUpgradeTest struct { func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" } // Setup creates a ReplicaSet and makes sure it's replicas ready. -func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { +func (r *ReplicaSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { c := f.ClientSet ns := f.Namespace.Name nginxImage := imageutils.GetE2EImage(imageutils.Nginx) ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) replicaSet := newReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) - rs, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) + rs, err := c.AppsV1().ReplicaSets(ns).Create(ctx, replicaSet, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) - framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName)) r.UID = rs.UID } // Test checks whether the replicasets are the same after an upgrade. -func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (r *ReplicaSetUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { c := f.ClientSet ns := f.Namespace.Name rsClient := c.AppsV1().ReplicaSets(ns) @@ -77,14 +77,14 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{ // Verify the RS is the same (survives) after the upgrade ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName)) - upgradedRS, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) + upgradedRS, err := rsClient.Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err) if upgradedRS.UID != r.UID { framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID)) } ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName)) - framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName)) // Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum)) @@ -94,11 +94,11 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{ framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName)) - framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName)) } // Teardown cleans up any remaining resources. -func (r *ReplicaSetUpgradeTest) Teardown(f *framework.Framework) { +func (r *ReplicaSetUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index ae0c0105763..85e0a23fb47 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -70,7 +70,7 @@ func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { } // Setup creates a StatefulSet and a HeadlessService. It verifies the basic SatefulSet properties -func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { +func (t *StatefulSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ssName := "ss" labels := map[string]string{ "foo": "bar", @@ -86,49 +86,49 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { e2estatefulset.PauseNewPods(t.set) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) - _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Services(ns).Create(ctx, t.service, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 - _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), t.set, metav1.CreateOptions{}) + _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(ctx, t.set, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) - e2estatefulset.Saturate(f.ClientSet, t.set) - t.verify(f) - t.restart(f) - t.verify(f) + e2estatefulset.Saturate(ctx, f.ClientSet, t.set) + t.verify(ctx, f) + t.restart(ctx, f) + t.verify(ctx, f) } // Test waits for the upgrade to complete and verifies the StatefulSet basic functionality -func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *StatefulSetUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done - t.verify(f) + t.verify(ctx, f) } // Teardown deletes all StatefulSets -func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) { - e2estatefulset.DeleteAllStatefulSets(f.ClientSet, t.set.Name) +func (t *StatefulSetUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { + e2estatefulset.DeleteAllStatefulSets(ctx, f.ClientSet, t.set.Name) } -func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) { +func (t *StatefulSetUpgradeTest) verify(ctx context.Context, f *framework.Framework) { ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2estatefulset.CheckMount(f.ClientSet, t.set, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(ctx, f.ClientSet, t.set, "/data")) ginkgo.By("Verifying statefulset provides a stable hostname for each pod") - framework.ExpectNoError(e2estatefulset.CheckHostname(f.ClientSet, t.set)) + framework.ExpectNoError(e2estatefulset.CheckHostname(ctx, f.ClientSet, t.set)) ginkgo.By("Verifying statefulset set proper service name") framework.ExpectNoError(e2estatefulset.CheckServiceName(t.set, t.set.Spec.ServiceName)) cmd := "echo $(hostname) > /data/hostname; sync;" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(f.ClientSet, t.set, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(ctx, f.ClientSet, t.set, cmd)) } -func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) { +func (t *StatefulSetUpgradeTest) restart(ctx context.Context, f *framework.Framework) { ginkgo.By("Restarting statefulset " + t.set.Name) - e2estatefulset.Restart(f.ClientSet, t.set) - e2estatefulset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set) + e2estatefulset.Restart(ctx, f.ClientSet, t.set) + e2estatefulset.WaitForRunningAndReady(ctx, f.ClientSet, *t.set.Spec.Replicas, t.set) } diff --git a/test/e2e/upgrades/auth/serviceaccount_admission_controller_migration.go b/test/e2e/upgrades/auth/serviceaccount_admission_controller_migration.go index f157d333325..798097f403b 100644 --- a/test/e2e/upgrades/auth/serviceaccount_admission_controller_migration.go +++ b/test/e2e/upgrades/auth/serviceaccount_admission_controller_migration.go @@ -49,20 +49,20 @@ func (ServiceAccountAdmissionControllerMigrationTest) Name() string { } // Setup creates pod-before-migration which has legacy service account token. -func (t *ServiceAccountAdmissionControllerMigrationTest) Setup(f *framework.Framework) { - t.pod = createPod(f, podBeforeMigrationName) - inClusterClientMustWork(f, t.pod) +func (t *ServiceAccountAdmissionControllerMigrationTest) Setup(ctx context.Context, f *framework.Framework) { + t.pod = createPod(ctx, f, podBeforeMigrationName) + inClusterClientMustWork(ctx, f, t.pod) } // Test waits for the upgrade to complete, and then verifies pod-before-migration // and pod-after-migration are able to make requests using in cluster config. -func (t *ServiceAccountAdmissionControllerMigrationTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *ServiceAccountAdmissionControllerMigrationTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { ginkgo.By("Waiting for upgrade to finish") <-done ginkgo.By("Starting post-upgrade check") ginkgo.By("Checking pod-before-migration makes successful requests using in cluster config") - podBeforeMigration, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podBeforeMigrationName, metav1.GetOptions{}) + podBeforeMigration, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podBeforeMigrationName, metav1.GetOptions{}) framework.ExpectNoError(err) if podBeforeMigration.GetUID() != t.pod.GetUID() { framework.Failf("Pod %q GetUID() = %q, want %q.", podBeforeMigration.Name, podBeforeMigration.GetUID(), t.pod.GetUID()) @@ -70,29 +70,29 @@ func (t *ServiceAccountAdmissionControllerMigrationTest) Test(f *framework.Frame if podBeforeMigration.Status.ContainerStatuses[0].RestartCount != 0 { framework.Failf("Pod %q RestartCount = %d, want 0.", podBeforeMigration.Name, podBeforeMigration.Status.ContainerStatuses[0].RestartCount) } - inClusterClientMustWork(f, podBeforeMigration) + inClusterClientMustWork(ctx, f, podBeforeMigration) ginkgo.By("Checking pod-after-migration makes successful requests using in cluster config") - podAfterMigration := createPod(f, podAfterMigrationName) + podAfterMigration := createPod(ctx, f, podAfterMigrationName) if len(podAfterMigration.Spec.Volumes) != 1 || podAfterMigration.Spec.Volumes[0].Projected == nil { framework.Failf("Pod %q Volumes[0].Projected.Sources = nil, want non-nil.", podAfterMigration.Name) } - inClusterClientMustWork(f, podAfterMigration) + inClusterClientMustWork(ctx, f, podAfterMigration) ginkgo.By("Finishing post-upgrade check") } // Teardown cleans up any remaining resources. -func (t *ServiceAccountAdmissionControllerMigrationTest) Teardown(f *framework.Framework) { +func (t *ServiceAccountAdmissionControllerMigrationTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } -func inClusterClientMustWork(f *framework.Framework, pod *v1.Pod) { +func inClusterClientMustWork(ctx context.Context, f *framework.Framework, pod *v1.Pod) { var logs string since := time.Now() if err := wait.PollImmediate(15*time.Second, 5*time.Minute, func() (done bool, err error) { framework.Logf("Polling logs") - logs, err = e2epod.GetPodLogsSince(f.ClientSet, pod.Namespace, pod.Name, "inclusterclient", since) + logs, err = e2epod.GetPodLogsSince(ctx, f.ClientSet, pod.Namespace, pod.Name, "inclusterclient", since) if err != nil { framework.Logf("Error pulling logs: %v", err) return false, nil @@ -113,7 +113,7 @@ func inClusterClientMustWork(f *framework.Framework, pod *v1.Pod) { } // createPod creates a pod. -func createPod(f *framework.Framework, podName string) *v1.Pod { +func createPod(ctx context.Context, f *framework.Framework, podName string) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -129,11 +129,11 @@ func createPod(f *framework.Framework, podName string) *v1.Pod { }, } - createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Created pod %s", podName) - if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { + if !e2epod.CheckPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { framework.Failf("Pod %q/%q never became ready", createdPod.Namespace, createdPod.Name) } diff --git a/test/e2e/upgrades/autoscaling/horizontal_pod_autoscalers.go b/test/e2e/upgrades/autoscaling/horizontal_pod_autoscalers.go index f763a91a03e..ddac480002a 100644 --- a/test/e2e/upgrades/autoscaling/horizontal_pod_autoscalers.go +++ b/test/e2e/upgrades/autoscaling/horizontal_pod_autoscalers.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -38,8 +39,8 @@ type HPAUpgradeTest struct { func (HPAUpgradeTest) Name() string { return "hpa-upgrade" } // Setup creates a resource consumer and an HPA object that autoscales the consumer. -func (t *HPAUpgradeTest) Setup(f *framework.Framework) { - t.rc = e2eautoscaling.NewDynamicResourceConsumer( +func (t *HPAUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { + t.rc = e2eautoscaling.NewDynamicResourceConsumer(ctx, "res-cons-upgrade", f.Namespace.Name, e2eautoscaling.KindRC, @@ -53,49 +54,49 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) { f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) - t.hpa = e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler( + t.hpa = e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(ctx, t.rc, 20, /* targetCPUUtilizationPercent */ 1, /* minPods */ 5) /* maxPods */ t.rc.Pause() - t.test() + t.test(ctx) } // Test waits for upgrade to complete and verifies if HPA works correctly. -func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *HPAUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { // Block until upgrade is done ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA")) <-done - t.test() + t.test(ctx) } // Teardown cleans up any remaining resources. -func (t *HPAUpgradeTest) Teardown(f *framework.Framework) { +func (t *HPAUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything - e2eautoscaling.DeleteHorizontalPodAutoscaler(t.rc, t.hpa.Name) - t.rc.CleanUp() + e2eautoscaling.DeleteHorizontalPodAutoscaler(ctx, t.rc, t.hpa.Name) + t.rc.CleanUp(ctx) } -func (t *HPAUpgradeTest) test() { +func (t *HPAUpgradeTest) test(ctx context.Context) { const timeToWait = 15 * time.Minute - t.rc.Resume() + t.rc.Resume(ctx) ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1.")) t.rc.ConsumeCPU(10) /* millicores */ ginkgo.By(fmt.Sprintf("HPA waits for 1 replica")) - t.rc.WaitForReplicas(1, timeToWait) + t.rc.WaitForReplicas(ctx, 1, timeToWait) ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores.")) t.rc.ConsumeCPU(250) /* millicores */ ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas")) - t.rc.WaitForReplicas(3, timeToWait) + t.rc.WaitForReplicas(ctx, 3, timeToWait) ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5.")) t.rc.ConsumeCPU(700) /* millicores */ ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas")) - t.rc.WaitForReplicas(5, timeToWait) + t.rc.WaitForReplicas(ctx, 5, timeToWait) // We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail. t.rc.Pause() diff --git a/test/e2e/upgrades/network/kube_proxy_migration.go b/test/e2e/upgrades/network/kube_proxy_migration.go index 9e3f1579f50..a5c6e970da1 100644 --- a/test/e2e/upgrades/network/kube_proxy_migration.go +++ b/test/e2e/upgrades/network/kube_proxy_migration.go @@ -50,14 +50,14 @@ type KubeProxyUpgradeTest struct { func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" } // Setup verifies kube-proxy static pods is running before upgrade. -func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) { +func (t *KubeProxyUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ginkgo.By("Waiting for kube-proxy static pods running and ready") - err := waitForKubeProxyStaticPodsRunning(f.ClientSet) + err := waitForKubeProxyStaticPodsRunning(ctx, f.ClientSet) framework.ExpectNoError(err) } // Test validates if kube-proxy is migrated from static pods to DaemonSet. -func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *KubeProxyUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { c := f.ClientSet // Block until upgrade is done. @@ -65,16 +65,16 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{} <-done ginkgo.By("Waiting for kube-proxy static pods disappear") - err := waitForKubeProxyStaticPodsDisappear(c) + err := waitForKubeProxyStaticPodsDisappear(ctx, c) framework.ExpectNoError(err) ginkgo.By("Waiting for kube-proxy DaemonSet running and ready") - err = waitForKubeProxyDaemonSetRunning(f, c) + err = waitForKubeProxyDaemonSetRunning(ctx, f, c) framework.ExpectNoError(err) } // Teardown does nothing. -func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) { +func (t *KubeProxyUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { } // KubeProxyDowngradeTest tests kube-proxy DaemonSet -> static pods downgrade path. @@ -85,14 +85,14 @@ type KubeProxyDowngradeTest struct { func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" } // Setup verifies kube-proxy DaemonSet is running before upgrade. -func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) { +func (t *KubeProxyDowngradeTest) Setup(ctx context.Context, f *framework.Framework) { ginkgo.By("Waiting for kube-proxy DaemonSet running and ready") - err := waitForKubeProxyDaemonSetRunning(f, f.ClientSet) + err := waitForKubeProxyDaemonSetRunning(ctx, f, f.ClientSet) framework.ExpectNoError(err) } // Test validates if kube-proxy is migrated from DaemonSet to static pods. -func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *KubeProxyDowngradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { c := f.ClientSet // Block until upgrade is done. @@ -100,29 +100,29 @@ func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct <-done ginkgo.By("Waiting for kube-proxy DaemonSet disappear") - err := waitForKubeProxyDaemonSetDisappear(c) + err := waitForKubeProxyDaemonSetDisappear(ctx, c) framework.ExpectNoError(err) ginkgo.By("Waiting for kube-proxy static pods running and ready") - err = waitForKubeProxyStaticPodsRunning(c) + err = waitForKubeProxyStaticPodsRunning(ctx, c) framework.ExpectNoError(err) } // Teardown does nothing. -func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) { +func (t *KubeProxyDowngradeTest) Teardown(ctx context.Context, f *framework.Framework) { } -func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { +func waitForKubeProxyStaticPodsRunning(ctx context.Context, c clientset.Interface) error { framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout) condition := func() (bool, error) { - pods, err := getKubeProxyStaticPods(c) + pods, err := getKubeProxyStaticPods(ctx, c) if err != nil { framework.Logf("Failed to get kube-proxy static pods: %v", err) return false, nil } - nodes, err := e2enode.GetReadySchedulableNodes(c) + nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) if err != nil { framework.Logf("Failed to get nodes: %v", err) return false, nil @@ -148,11 +148,11 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { return nil } -func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error { +func waitForKubeProxyStaticPodsDisappear(ctx context.Context, c clientset.Interface) error { framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout) condition := func() (bool, error) { - pods, err := getKubeProxyStaticPods(c) + pods, err := getKubeProxyStaticPods(ctx, c) if err != nil { framework.Logf("Failed to get kube-proxy static pods: %v", err) return false, nil @@ -171,11 +171,11 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error { return nil } -func waitForKubeProxyDaemonSetRunning(f *framework.Framework, c clientset.Interface) error { +func waitForKubeProxyDaemonSetRunning(ctx context.Context, f *framework.Framework, c clientset.Interface) error { framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout) condition := func() (bool, error) { - daemonSets, err := getKubeProxyDaemonSet(c) + daemonSets, err := getKubeProxyDaemonSet(ctx, c) if err != nil { framework.Logf("Failed to get kube-proxy DaemonSet: %v", err) return false, nil @@ -186,7 +186,7 @@ func waitForKubeProxyDaemonSetRunning(f *framework.Framework, c clientset.Interf return false, nil } - return e2edaemonset.CheckRunningOnAllNodes(f, &daemonSets.Items[0]) + return e2edaemonset.CheckRunningOnAllNodes(ctx, f, &daemonSets.Items[0]) } if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil { @@ -195,11 +195,11 @@ func waitForKubeProxyDaemonSetRunning(f *framework.Framework, c clientset.Interf return nil } -func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error { +func waitForKubeProxyDaemonSetDisappear(ctx context.Context, c clientset.Interface) error { framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout) condition := func() (bool, error) { - daemonSets, err := getKubeProxyDaemonSet(c) + daemonSets, err := getKubeProxyDaemonSet(ctx, c) if err != nil { framework.Logf("Failed to get kube-proxy DaemonSet: %v", err) return false, nil @@ -218,14 +218,14 @@ func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error { return nil } -func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) { +func getKubeProxyStaticPods(ctx context.Context, c clientset.Interface) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) + return c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, listOpts) } -func getKubeProxyDaemonSet(c clientset.Interface) (*appsv1.DaemonSetList, error) { +func getKubeProxyDaemonSet(ctx context.Context, c clientset.Interface) (*appsv1.DaemonSetList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(context.TODO(), listOpts) + return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(ctx, listOpts) } diff --git a/test/e2e/upgrades/network/services.go b/test/e2e/upgrades/network/services.go index f41ad7ee014..44036c58a57 100644 --- a/test/e2e/upgrades/network/services.go +++ b/test/e2e/upgrades/network/services.go @@ -17,6 +17,8 @@ limitations under the License. package network import ( + "context" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -42,7 +44,7 @@ func (ServiceUpgradeTest) Name() string { return "service-upgrade" } func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") } // Setup creates a service with a load balancer and makes sure it's reachable. -func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { +func (t *ServiceUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { serviceName := "service-test" jig := e2eservice.NewTestJig(f.ClientSet, f.Namespace.Name, serviceName) @@ -50,11 +52,11 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { cs := f.ClientSet ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name) - _, err := jig.CreateTCPService(func(s *v1.Service) { + _, err := jig.CreateTCPService(ctx, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) framework.ExpectNoError(err) - tcpService, err := jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) + tcpService, err := jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)) framework.ExpectNoError(err) // Get info to hit it with @@ -62,12 +64,12 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { svcPort := int(tcpService.Spec.Ports[0].Port) ginkgo.By("creating pod to be part of service " + serviceName) - rc, err := jig.Run(jig.AddRCAntiAffinity) + rc, err := jig.Run(ctx, jig.AddRCAntiAffinity) framework.ExpectNoError(err) if shouldTestPDBs() { ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController") - _, err = jig.CreatePDB(rc) + _, err = jig.CreatePDB(ctx, rc) framework.ExpectNoError(err) } @@ -77,7 +79,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { if framework.ProviderIs("aws") { timeout = e2eservice.LoadBalancerLagTimeoutAWS } - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, timeout) + e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, timeout) t.jig = jig t.tcpService = tcpService @@ -86,29 +88,30 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { } // Test runs a connectivity check to the service. -func (t *ServiceUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *ServiceUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { switch upgrade { case upgrades.MasterUpgrade, upgrades.ClusterUpgrade: - t.test(f, done, true, true) + t.test(ctx, f, done, true, true) case upgrades.NodeUpgrade: // Node upgrades should test during disruption only on GCE/GKE for now. - t.test(f, done, shouldTestPDBs(), false) + t.test(ctx, f, done, shouldTestPDBs(), false) default: - t.test(f, done, false, false) + t.test(ctx, f, done, false, false) } } // Teardown cleans up any remaining resources. -func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) { +func (t *ServiceUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } -func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption, testFinalizer bool) { +func (t *ServiceUpgradeTest) test(ctx context.Context, f *framework.Framework, done <-chan struct{}, testDuringDisruption, testFinalizer bool) { if testDuringDisruption { // Continuous validation ginkgo.By("continuously hitting the pod through the service's LoadBalancer") + // TODO (pohly): add context support wait.Until(func() { - e2eservice.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) + e2eservice.TestReachableHTTP(ctx, t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) }, framework.Poll, done) } else { // Block until upgrade is done @@ -118,13 +121,13 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, // Hit it once more ginkgo.By("hitting the pod through the service's LoadBalancer") - e2eservice.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) + e2eservice.TestReachableHTTP(ctx, t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) if testFinalizer { defer func() { ginkgo.By("Check that service can be deleted with finalizer") - e2eservice.WaitForServiceDeletedWithFinalizer(t.jig.Client, t.tcpService.Namespace, t.tcpService.Name) + e2eservice.WaitForServiceDeletedWithFinalizer(ctx, t.jig.Client, t.tcpService.Namespace, t.tcpService.Name) }() ginkgo.By("Check that finalizer is present on loadBalancer type service") - e2eservice.WaitForServiceUpdatedWithFinalizer(t.jig.Client, t.tcpService.Namespace, t.tcpService.Name, true) + e2eservice.WaitForServiceUpdatedWithFinalizer(ctx, t.jig.Client, t.tcpService.Namespace, t.tcpService.Name, true) } } diff --git a/test/e2e/upgrades/node/apparmor.go b/test/e2e/upgrades/node/apparmor.go index e8ca5093e8f..967fcd74228 100644 --- a/test/e2e/upgrades/node/apparmor.go +++ b/test/e2e/upgrades/node/apparmor.go @@ -57,54 +57,54 @@ func (AppArmorUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { } // Setup creates a secret and then verifies that a pod can consume it. -func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) { +func (t *AppArmorUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { ginkgo.By("Loading AppArmor profiles to nodes") - e2esecurity.LoadAppArmorProfiles(f.Namespace.Name, f.ClientSet) + e2esecurity.LoadAppArmorProfiles(ctx, f.Namespace.Name, f.ClientSet) // Create the initial test pod. ginkgo.By("Creating a long-running AppArmor enabled pod.") - t.pod = e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, false) + t.pod = e2esecurity.CreateAppArmorTestPod(ctx, f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, false) // Verify initial state. - t.verifyNodesAppArmorEnabled(f) - t.verifyNewPodSucceeds(f) + t.verifyNodesAppArmorEnabled(ctx, f) + t.verifyNewPodSucceeds(ctx, f) } // Test waits for the upgrade to complete, and then verifies that a // pod can still consume the secret. -func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *AppArmorUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done if upgrade == upgrades.MasterUpgrade { - t.verifyPodStillUp(f) + t.verifyPodStillUp(ctx, f) } - t.verifyNodesAppArmorEnabled(f) - t.verifyNewPodSucceeds(f) + t.verifyNodesAppArmorEnabled(ctx, f) + t.verifyNewPodSucceeds(ctx, f) } // Teardown cleans up any remaining resources. -func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { +func (t *AppArmorUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything ginkgo.By("Logging container failures") - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) } -func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { +func (t *AppArmorUpgradeTest) verifyPodStillUp(ctx context.Context, f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod") - pod, err := e2epod.NewPodClient(f).Get(context.TODO(), t.pod.Name, metav1.GetOptions{}) + pod, err := e2epod.NewPodClient(f).Get(ctx, t.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running") gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted") } -func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { +func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(ctx context.Context, f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is enforced for a new pod") - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) + e2esecurity.CreateAppArmorTestPod(ctx, f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) } -func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { +func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(ctx context.Context, f *framework.Framework) { ginkgo.By("Verifying nodes are AppArmor enabled") - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list nodes") for _, node := range nodes.Items { gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{ diff --git a/test/e2e/upgrades/node/configmaps.go b/test/e2e/upgrades/node/configmaps.go index 02d7ff6d576..075f4319377 100644 --- a/test/e2e/upgrades/node/configmaps.go +++ b/test/e2e/upgrades/node/configmaps.go @@ -43,7 +43,7 @@ func (ConfigMapUpgradeTest) Name() string { } // Setup creates a ConfigMap and then verifies that a pod can consume it. -func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { +func (t *ConfigMapUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { configMapName := "upgrade-configmap" ns := f.Namespace @@ -60,30 +60,30 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a ConfigMap") var err error - if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), t.configMap, metav1.CreateOptions{}); err != nil { + if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(ctx, t.configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } ginkgo.By("Making sure the ConfigMap is consumable") - t.testPod(f) + t.testPod(ctx, f) } // Test waits for the upgrade to complete, and then verifies that a // pod can still consume the ConfigMap. -func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *ConfigMapUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Consuming the ConfigMap after upgrade") - t.testPod(f) + t.testPod(ctx, f) } // Teardown cleans up any remaining resources. -func (t *ConfigMapUpgradeTest) Teardown(f *framework.Framework) { +func (t *ConfigMapUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } // testPod creates a pod that consumes a ConfigMap and prints it out. The // output is then verified. -func (t *ConfigMapUpgradeTest) testPod(f *framework.Framework) { +func (t *ConfigMapUpgradeTest) testPod(ctx context.Context, f *framework.Framework) { volumeName := "configmap-volume" volumeMountPath := "/etc/configmap-volume" @@ -148,8 +148,8 @@ func (t *ConfigMapUpgradeTest) testPod(f *framework.Framework) { "content of file \"/etc/configmap-volume/data\": some configmap data", "mode of file \"/etc/configmap-volume/data\": -rw-r--r--", } - e2eoutput.TestContainerOutput(f, "volume consume configmap", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(ctx, f, "volume consume configmap", pod, 0, expectedOutput) expectedOutput = []string{"CONFIGMAP_DATA=some configmap data"} - e2eoutput.TestContainerOutput(f, "env consume configmap", pod, 1, expectedOutput) + e2eoutput.TestContainerOutput(ctx, f, "env consume configmap", pod, 1, expectedOutput) } diff --git a/test/e2e/upgrades/node/nvidia-gpu.go b/test/e2e/upgrades/node/nvidia-gpu.go index 3331d70bc19..e02a64e8a72 100644 --- a/test/e2e/upgrades/node/nvidia-gpu.go +++ b/test/e2e/upgrades/node/nvidia-gpu.go @@ -17,6 +17,8 @@ limitations under the License. package node import ( + "context" + "k8s.io/kubernetes/test/e2e/framework" e2ejob "k8s.io/kubernetes/test/e2e/framework/job" "k8s.io/kubernetes/test/e2e/scheduling" @@ -38,27 +40,27 @@ type NvidiaGPUUpgradeTest struct { func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" } // Setup creates a job requesting gpu. -func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) { - scheduling.SetupNVIDIAGPUNode(f, false) +func (t *NvidiaGPUUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { + scheduling.SetupNVIDIAGPUNode(ctx, f, false) ginkgo.By("Creating a job requesting gpu") - scheduling.StartJob(f, completions) + scheduling.StartJob(ctx, f, completions) } // Test waits for the upgrade to complete, and then verifies that the // cuda pod started by the gpu job can successfully finish. -func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *NvidiaGPUUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Verifying gpu job success") - scheduling.VerifyJobNCompletions(f, completions) + scheduling.VerifyJobNCompletions(ctx, f, completions) if upgrade == upgrades.MasterUpgrade || upgrade == upgrades.ClusterUpgrade { // MasterUpgrade should be totally hitless. - job, err := e2ejob.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add") + job, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, "cuda-add") framework.ExpectNoError(err) framework.ExpectEqual(job.Status.Failed, 0, "Job pods failed during master upgrade: %v", job.Status.Failed) } } // Teardown cleans up any remaining resources. -func (t *NvidiaGPUUpgradeTest) Teardown(f *framework.Framework) { +func (t *NvidiaGPUUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } diff --git a/test/e2e/upgrades/node/secrets.go b/test/e2e/upgrades/node/secrets.go index 5f3d185cb2c..2257ecff69c 100644 --- a/test/e2e/upgrades/node/secrets.go +++ b/test/e2e/upgrades/node/secrets.go @@ -41,7 +41,7 @@ type SecretUpgradeTest struct { func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" } // Setup creates a secret and then verifies that a pod can consume it. -func (t *SecretUpgradeTest) Setup(f *framework.Framework) { +func (t *SecretUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { secretName := "upgrade-secret" ns := f.Namespace @@ -58,30 +58,30 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a secret") var err error - if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), t.secret, metav1.CreateOptions{}); err != nil { + if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(ctx, t.secret, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) } ginkgo.By("Making sure the secret is consumable") - t.testPod(f) + t.testPod(ctx, f) } // Test waits for the upgrade to complete, and then verifies that a // pod can still consume the secret. -func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *SecretUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Consuming the secret after upgrade") - t.testPod(f) + t.testPod(ctx, f) } // Teardown cleans up any remaining resources. -func (t *SecretUpgradeTest) Teardown(f *framework.Framework) { +func (t *SecretUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } // testPod creates a pod that consumes a secret and prints it out. The // output is then verified. -func (t *SecretUpgradeTest) testPod(f *framework.Framework) { +func (t *SecretUpgradeTest) testPod(ctx context.Context, f *framework.Framework) { volumeName := "secret-volume" volumeMountPath := "/etc/secret-volume" @@ -145,8 +145,8 @@ func (t *SecretUpgradeTest) testPod(f *framework.Framework) { "mode of file \"/etc/secret-volume/data\": -rw-r--r--", } - e2eoutput.TestContainerOutput(f, "volume consume secrets", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(ctx, f, "volume consume secrets", pod, 0, expectedOutput) expectedOutput = []string{"SECRET_DATA=keep it secret"} - e2eoutput.TestContainerOutput(f, "env consume secrets", pod, 1, expectedOutput) + e2eoutput.TestContainerOutput(ctx, f, "env consume secrets", pod, 1, expectedOutput) } diff --git a/test/e2e/upgrades/node/sysctl.go b/test/e2e/upgrades/node/sysctl.go index f58bbfdd169..b4526f78f2d 100644 --- a/test/e2e/upgrades/node/sysctl.go +++ b/test/e2e/upgrades/node/sysctl.go @@ -45,25 +45,25 @@ type SysctlUpgradeTest struct { // Setup creates two pods: one with safe sysctls, one with unsafe sysctls. It checks that the former // launched and the later is rejected. -func (t *SysctlUpgradeTest) Setup(f *framework.Framework) { - t.validPod = t.verifySafeSysctlWork(f) - t.invalidPod = t.verifyUnsafeSysctlsAreRejected(f) +func (t *SysctlUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { + t.validPod = t.verifySafeSysctlWork(ctx, f) + t.invalidPod = t.verifyUnsafeSysctlsAreRejected(ctx, f) } // Test waits for the upgrade to complete, and then verifies that a // pod can still consume the ConfigMap. -func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *SysctlUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done switch upgrade { case upgrades.MasterUpgrade, upgrades.ClusterUpgrade: ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade") - pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(context.TODO(), t.validPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(ctx, t.validPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(pod.Status.Phase, v1.PodRunning) } ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") - pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(context.TODO(), t.invalidPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(ctx, t.invalidPod.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } @@ -71,39 +71,39 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u framework.ExpectNotEqual(pod.Status.Phase, v1.PodRunning) } - t.verifySafeSysctlWork(f) - t.verifyUnsafeSysctlsAreRejected(f) + t.verifySafeSysctlWork(ctx, f) + t.verifyUnsafeSysctlsAreRejected(ctx, f) } // Teardown cleans up any remaining resources. -func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) { +func (t *SysctlUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { // rely on the namespace deletion to clean up everything } -func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod { +func (t *SysctlUpgradeTest) verifySafeSysctlWork(ctx context.Context, f *framework.Framework) *v1.Pod { ginkgo.By("Creating a pod with safe sysctls") safeSysctl := "net.ipv4.ip_local_port_range" safeSysctlValue := "1024 1042" sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue}) - validPod := e2epod.NewPodClient(f).Create(t.validPod) + validPod := e2epod.NewPodClient(f).Create(ctx, t.validPod) ginkgo.By("Making sure the valid pod launches") - _, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(t.validPod) + _, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, t.validPod) framework.ExpectNoError(err) - e2eoutput.TestContainerOutput(f, "pod with safe sysctl launched", t.validPod, 0, []string{fmt.Sprintf("%s = %s", safeSysctl, safeSysctlValue)}) + e2eoutput.TestContainerOutput(ctx, f, "pod with safe sysctl launched", t.validPod, 0, []string{fmt.Sprintf("%s = %s", safeSysctl, safeSysctlValue)}) return validPod } -func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod { +func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(ctx context.Context, f *framework.Framework) *v1.Pod { ginkgo.By("Creating a pod with unsafe sysctls") invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{ "fs.mount-max": "1000000", }) - invalidPod = e2epod.NewPodClient(f).Create(invalidPod) + invalidPod = e2epod.NewPodClient(f).Create(ctx, invalidPod) ginkgo.By("Making sure the invalid pod failed") - ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(invalidPod) + ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, invalidPod) framework.ExpectNoError(err) framework.ExpectEqual(ev.Reason, sysctl.ForbiddenReason) diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 9f60c3097ff..ca8d39e5001 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -17,6 +17,8 @@ limitations under the License. package storage import ( + "context" + v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/test/e2e/framework" @@ -45,7 +47,7 @@ const ( ) // Setup creates a pv and then verifies that a pod can consume it. The pod writes data to the volume. -func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) { +func (t *PersistentVolumeUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { var err error e2eskipper.SkipUnlessProviderIs("gce", "gke", "openstack", "aws", "vsphere", "azure") @@ -57,32 +59,32 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) { StorageClassName: nil, } t.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns) - t.pvc, err = e2epv.CreatePVC(f.ClientSet, ns, t.pvc) + t.pvc, err = e2epv.CreatePVC(ctx, f.ClientSet, ns, t.pvc) framework.ExpectNoError(err) ginkgo.By("Consuming the PV before upgrade") - t.testPod(f, pvWriteCmd+";"+pvReadCmd) + t.testPod(ctx, f, pvWriteCmd+";"+pvReadCmd) } // Test waits for the upgrade to complete, and then verifies that a pod can still consume the pv // and that the volume data persists. -func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *PersistentVolumeUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Consuming the PV after upgrade") - t.testPod(f, pvReadCmd) + t.testPod(ctx, f, pvReadCmd) } // Teardown cleans up any remaining resources. -func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) { - errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc) +func (t *PersistentVolumeUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) { + errs := e2epv.PVPVCCleanup(ctx, f.ClientSet, f.Namespace.Name, nil, t.pvc) if len(errs) > 0 { framework.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs)) } } // testPod creates a pod that consumes a pv and prints it out. The output is then verified. -func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) { +func (t *PersistentVolumeUpgradeTest) testPod(ctx context.Context, f *framework.Framework, cmd string) { pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd) expectedOutput := []string{pvTestData} - e2eoutput.TestContainerOutput(f, "pod consumes pv", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(ctx, f, "pod consumes pv", pod, 0, expectedOutput) } diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index 4cde7f6f0a3..8fe978740e4 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -69,7 +69,7 @@ func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { } // Setup creates a block pv and then verifies that a pod can consume it. The pod writes data to the volume. -func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { +func (t *VolumeModeDowngradeTest) Setup(ctx context.Context, f *framework.Framework) { var err error @@ -83,16 +83,16 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { VolumeMode: &block, } t.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns) - t.pvc, err = e2epv.CreatePVC(cs, ns, t.pvc) + t.pvc, err = e2epv.CreatePVC(ctx, cs, ns, t.pvc) framework.ExpectNoError(err) - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(context.TODO(), t.pvc.Name, metav1.GetOptions{}) + t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(ctx, t.pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - t.pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), t.pvc.Spec.VolumeName, metav1.GetOptions{}) + t.pv, err = cs.CoreV1().PersistentVolumes().Get(ctx, t.pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Consuming the PVC before downgrade") @@ -101,7 +101,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { PVCs: []*v1.PersistentVolumeClaim{t.pvc}, SeLinuxLabel: e2epv.SELinuxLabel, } - t.pod, err = e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout) + t.pod, err = e2epod.CreateSecPod(ctx, cs, &podConfig, framework.PodStartTimeout) framework.ExpectNoError(err) ginkgo.By("Checking if PV exists as expected volume mode") @@ -113,7 +113,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { // Test waits for the downgrade to complete, and then verifies that a pod can no // longer consume the pv as it is not mapped nor mounted into the pod -func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { +func (t *VolumeModeDowngradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { ginkgo.By("Waiting for downgrade to finish") <-done @@ -122,13 +122,13 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc } // Teardown cleans up any remaining resources. -func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { +func (t *VolumeModeDowngradeTest) Teardown(ctx context.Context, f *framework.Framework) { ginkgo.By("Deleting the pod") - framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, t.pod)) + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, t.pod)) ginkgo.By("Deleting the PVC") - framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(context.TODO(), t.pvc.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(ctx, t.pvc.Name, metav1.DeleteOptions{})) ginkgo.By("Waiting for the PV to be deleted") - framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) } diff --git a/test/e2e/upgrades/upgrade.go b/test/e2e/upgrades/upgrade.go index 78f0fbae8b6..2ee59520cdf 100644 --- a/test/e2e/upgrades/upgrade.go +++ b/test/e2e/upgrades/upgrade.go @@ -19,6 +19,8 @@ limitations under the License. package upgrades import ( + "context" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) @@ -49,17 +51,17 @@ type Test interface { // Setup should create and verify whatever objects need to // exist before the upgrade disruption starts. - Setup(f *framework.Framework) + Setup(ctx context.Context, f *framework.Framework) // Test will run during the upgrade. When the upgrade is // complete, done will be closed and final validation can // begin. - Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) + Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) // Teardown should clean up any objects that are created that // aren't already cleaned up by the framework. This will // always be called, even if Setup failed. - Teardown(f *framework.Framework) + Teardown(ctx context.Context, f *framework.Framework) } // Skippable is an interface that an upgrade test can implement to be diff --git a/test/e2e/upgrades/upgrade_suite.go b/test/e2e/upgrades/upgrade_suite.go index a7379c34628..6692fbdcd08 100644 --- a/test/e2e/upgrades/upgrade_suite.go +++ b/test/e2e/upgrades/upgrade_suite.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "encoding/xml" "fmt" "os" @@ -41,7 +42,7 @@ type chaosMonkeyAdapter struct { upgCtx UpgradeContext } -func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) { +func (cma *chaosMonkeyAdapter) Test(ctx context.Context, sem *chaosmonkey.Semaphore) { var once sync.Once ready := func() { once.Do(func() { @@ -55,9 +56,9 @@ func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) { } ginkgo.DeferCleanup(cma.test.Teardown, cma.framework) - cma.test.Setup(cma.framework) + cma.test.Setup(ctx, cma.framework) ready() - cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType) + cma.test.Test(ctx, cma.framework, sem.StopCh, cma.upgradeType) } func CreateUpgradeFrameworks(tests []Test) map[string]*framework.Framework { @@ -75,12 +76,13 @@ func CreateUpgradeFrameworks(tests []Test) map[string]*framework.Framework { // RunUpgradeSuite runs the actual upgrade tests. func RunUpgradeSuite( + ctx context.Context, upgCtx *UpgradeContext, tests []Test, testFrameworks map[string]*framework.Framework, testSuite *junit.TestSuite, upgradeType UpgradeType, - upgradeFunc func(), + upgradeFunc func(ctx context.Context), ) { cm := chaosmonkey.New(upgradeFunc) for _, t := range tests { @@ -112,5 +114,5 @@ func RunUpgradeSuite( xml.NewEncoder(f).Encode(testSuite) } }() - cm.Do() + cm.Do(ctx) } diff --git a/test/e2e/windows/cpu_limits.go b/test/e2e/windows/cpu_limits.go index 2fdbbb43d83..6dd54839def 100644 --- a/test/e2e/windows/cpu_limits.go +++ b/test/e2e/windows/cpu_limits.go @@ -45,17 +45,17 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() { ginkgo.It("should not be exceeded after waiting 2 minutes", func(ctx context.Context) { ginkgo.By("Creating one pod with limit set to '0.5'") podsDecimal := newCPUBurnPods(1, powershellImage, "0.5", "1Gi") - e2epod.NewPodClient(f).CreateBatch(podsDecimal) + e2epod.NewPodClient(f).CreateBatch(ctx, podsDecimal) ginkgo.By("Creating one pod with limit set to '500m'") podsMilli := newCPUBurnPods(1, powershellImage, "500m", "1Gi") - e2epod.NewPodClient(f).CreateBatch(podsMilli) + e2epod.NewPodClient(f).CreateBatch(ctx, podsMilli) ginkgo.By("Waiting 2 minutes") time.Sleep(2 * time.Minute) ginkgo.By("Ensuring pods are still running") var allPods [](*v1.Pod) for _, p := range podsDecimal { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error retrieving pod") @@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() { } for _, p := range podsMilli { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error retrieving pod") @@ -74,7 +74,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() { ginkgo.By("Ensuring cpu doesn't exceed limit by >5%") for _, p := range allPods { ginkgo.By("Gathering node summary stats") - nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, p.Spec.NodeName) + nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, p.Spec.NodeName) framework.ExpectNoError(err, "Error grabbing node summary stats") found := false cpuUsage := float64(0) diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 7ed40e9fc73..a5edea46b64 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() { desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval) ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" - runDensityBatchTest(f, itArg) + runDensityBatchTest(ctx, f, itArg) }) } }) @@ -89,7 +89,7 @@ type densityTest struct { } // runDensityBatchTest runs the density batch pod creation test -func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Duration, []e2emetrics.PodLatencyData) { +func runDensityBatchTest(ctx context.Context, f *framework.Framework, testArg densityTest) (time.Duration, []e2emetrics.PodLatencyData) { const ( podType = "density_test_pod" ) @@ -103,17 +103,17 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura pods := newDensityTestPods(testArg.podsNr, false, imageutils.GetPauseImageName(), podType) // the controller watches the change of pod status - controller := newInformerWatchPod(f, mutex, watchTimes, podType) + controller := newInformerWatchPod(ctx, f, mutex, watchTimes, podType) go controller.Run(stopCh) defer close(stopCh) ginkgo.By("Creating a batch of pods") // It returns a map['pod name']'creation time' containing the creation timestamps - createTimes := createBatchPodWithRateControl(f, pods, testArg.interval) + createTimes := createBatchPodWithRateControl(ctx, f, pods, testArg.interval) ginkgo.By("Waiting for all Pods to be observed by the watch...") - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return len(watchTimes) == testArg.podsNr }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) @@ -154,25 +154,25 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura sort.Sort(e2emetrics.LatencySlice(e2eLags)) batchLag := lastRunning.Time.Sub(firstCreate.Time) - deletePodsSync(f, pods) + deletePodsSync(ctx, f, pods) return batchLag, e2eLags } // createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation. // between creations there is an interval for throughput control -func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time { +func createBatchPodWithRateControl(ctx context.Context, f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time { createTimes := make(map[string]metav1.Time) for _, pod := range pods { createTimes[pod.ObjectMeta.Name] = metav1.Now() - go e2epod.NewPodClient(f).Create(pod) + go e2epod.NewPodClient(f).Create(ctx, pod) time.Sleep(interval) } return createTimes } // newInformerWatchPod creates an informer to check whether all pods are running. -func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { +func newInformerWatchPod(ctx context.Context, f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { ns := f.Namespace.Name checkPodRunning := func(p *v1.Pod) { mutex.Lock() @@ -190,12 +190,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options) }, }, &v1.Pod{}, @@ -265,7 +265,7 @@ func newDensityTestPods(numPods int, volume bool, imageName, podType string) []* } // deletePodsSync deletes a list of pods and block until pods disappear. -func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { +func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) { var wg sync.WaitGroup for _, pod := range pods { wg.Add(1) @@ -273,10 +273,10 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) + err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) - err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + err = e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute) framework.ExpectNoError(err) }(pod) diff --git a/test/e2e/windows/device_plugin.go b/test/e2e/windows/device_plugin.go index d657e1b778e..29ac3ef640b 100644 --- a/test/e2e/windows/device_plugin.go +++ b/test/e2e/windows/device_plugin.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() { } sysNs := "kube-system" - _, err := cs.AppsV1().DaemonSets(sysNs).Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := cs.AppsV1().DaemonSets(sysNs).Create(ctx, ds, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("creating Windows testing Pod") @@ -104,10 +104,10 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() { windowsPod.Spec.Containers[0].Resources.Limits = v1.ResourceList{ "microsoft.com/directx": resource.MustParse("1"), } - windowsPod, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), windowsPod, metav1.CreateOptions{}) + windowsPod, err = cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, windowsPod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for the pod Running") - err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, windowsPod.Name, f.Namespace.Name, testSlowMultiplier*framework.PodStartTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, windowsPod.Name, f.Namespace.Name, testSlowMultiplier*framework.PodStartTimeout) framework.ExpectNoError(err) ginkgo.By("verifying device access in Windows testing Pod") diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index c37b0e212cb..500d32c6617 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { ginkgo.By("Getting the IP address of the internal Kubernetes service") - svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(ctx, "kube-dns", metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Preparing a test DNS service with injected DNS names...") @@ -60,7 +60,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { testPod.Spec.NodeSelector = map[string]string{ "kubernetes.io/os": "windows", } - testPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testPod, metav1.CreateOptions{}) + testPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, testPod, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("confirming that the pod has a windows label") @@ -68,11 +68,11 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { framework.Logf("Created pod %v", testPod) defer func() { framework.Logf("Deleting pod %s...", testPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testPod.Name, *metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, testPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("Failed to delete pod %s: %v", testPod.Name, err) } }() - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testPod.Name, f.Namespace.Name), "failed to wait for pod %s to be running", testPod.Name) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, testPod.Name, f.Namespace.Name), "failed to wait for pod %s to be running", testPod.Name) // This isn't the best 'test' but it is a great diagnostic, see later test for the 'real' test. ginkgo.By("Calling ipconfig to get debugging info for this pod's DNS and confirm that a dns server 1.1.1.1 can be injected, along with ") diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index a3a06c25af8..bcb2d3667cf 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -99,17 +99,17 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { defer ginkgo.GinkgoRecover() ginkgo.By("finding the worker node that fulfills this test's assumptions") - nodes := findPreconfiguredGmsaNodes(f.ClientSet) + nodes := findPreconfiguredGmsaNodes(ctx, f.ClientSet) if len(nodes) != 1 { e2eskipper.Skipf("Expected to find exactly one node with the %q label, found %d", gmsaFullNodeLabel, len(nodes)) } node := nodes[0] ginkgo.By("retrieving the contents of the GMSACredentialSpec custom resource manifest from the node") - crdManifestContents := retrieveCRDManifestFileContents(f, node) + crdManifestContents := retrieveCRDManifestFileContents(ctx, f, node) ginkgo.By("deploying the GMSA webhook") - err := deployGmsaWebhook(f) + err := deployGmsaWebhook(ctx, f) if err != nil { framework.Failf(err.Error()) } @@ -121,26 +121,26 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { } ginkgo.By("creating an RBAC role to grant use access to that GMSA resource") - rbacRoleName, err := createRBACRoleForGmsa(f) + rbacRoleName, err := createRBACRoleForGmsa(ctx, f) if err != nil { framework.Failf(err.Error()) } ginkgo.By("creating a service account") - serviceAccountName := createServiceAccount(f) + serviceAccountName := createServiceAccount(ctx, f) ginkgo.By("binding the RBAC role to the service account") - bindRBACRoleToServiceAccount(f, serviceAccountName, rbacRoleName) + bindRBACRoleToServiceAccount(ctx, f, serviceAccountName, rbacRoleName) ginkgo.By("creating a pod using the GMSA cred spec") - podName := createPodWithGmsa(f, serviceAccountName) + podName := createPodWithGmsa(ctx, f, serviceAccountName) // nltest /QUERY will only return successfully if there is a GMSA // identity configured, _and_ it succeeds in contacting the AD controller // and authenticating with it. ginkgo.By("checking that nltest /QUERY returns successfully") var output string - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", "/QUERY") if err != nil { framework.Logf("unable to run command in container via exec: %s", err) @@ -166,17 +166,17 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { defer ginkgo.GinkgoRecover() ginkgo.By("finding the worker node that fulfills this test's assumptions") - nodes := findPreconfiguredGmsaNodes(f.ClientSet) + nodes := findPreconfiguredGmsaNodes(ctx, f.ClientSet) if len(nodes) != 1 { e2eskipper.Skipf("Expected to find exactly one node with the %q label, found %d", gmsaFullNodeLabel, len(nodes)) } node := nodes[0] ginkgo.By("retrieving the contents of the GMSACredentialSpec custom resource manifest from the node") - crdManifestContents := retrieveCRDManifestFileContents(f, node) + crdManifestContents := retrieveCRDManifestFileContents(ctx, f, node) ginkgo.By("deploying the GMSA webhook") - err := deployGmsaWebhook(f) + err := deployGmsaWebhook(ctx, f) if err != nil { framework.Failf(err.Error()) } @@ -188,26 +188,26 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { } ginkgo.By("creating an RBAC role to grant use access to that GMSA resource") - rbacRoleName, err := createRBACRoleForGmsa(f) + rbacRoleName, err := createRBACRoleForGmsa(ctx, f) if err != nil { framework.Failf(err.Error()) } ginkgo.By("creating a service account") - serviceAccountName := createServiceAccount(f) + serviceAccountName := createServiceAccount(ctx, f) ginkgo.By("binding the RBAC role to the service account") - bindRBACRoleToServiceAccount(f, serviceAccountName, rbacRoleName) + bindRBACRoleToServiceAccount(ctx, f, serviceAccountName, rbacRoleName) ginkgo.By("creating a pod using the GMSA cred spec") - podName := createPodWithGmsa(f, serviceAccountName) + podName := createPodWithGmsa(ctx, f, serviceAccountName) ginkgo.By("getting the ip of GMSA domain") gmsaDomainIP := getGmsaDomainIP(f, podName) ginkgo.By("checking that file can be read and write from the remote folder successfully") filePath := fmt.Sprintf("\\\\%s\\%s\\write-test-%s.txt", gmsaDomainIP, gmsaSharedFolder, string(uuid.NewUUID())[0:4]) - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { // The filePath is a remote folder, do not change the format of it _, _ = runKubectlExecInNamespace(f.Namespace.Name, podName, "--", "powershell.exe", "-Command", "echo 'This is a test file.' > "+filePath) output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath) @@ -229,11 +229,11 @@ func isValidOutput(output string) bool { } // findPreconfiguredGmsaNode finds node with the gmsaFullNodeLabel label on it. -func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node { +func findPreconfiguredGmsaNodes(ctx context.Context, c clientset.Interface) []v1.Node { nodeOpts := metav1.ListOptions{ LabelSelector: gmsaFullNodeLabel, } - nodes, err := c.CoreV1().Nodes().List(context.TODO(), nodeOpts) + nodes, err := c.CoreV1().Nodes().List(ctx, nodeOpts) if err != nil { framework.Failf("Unable to list nodes: %v", err) } @@ -245,7 +245,7 @@ func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node { // on nodes with the gmsaFullNodeLabel label with that file's directory // mounted on it, and then exec-ing into that pod to retrieve the file's // contents. -func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) string { +func retrieveCRDManifestFileContents(ctx context.Context, f *framework.Framework, node v1.Node) string { podName := "retrieve-gmsa-crd-contents" // we can't use filepath.Dir here since the test itself runs on a Linux machine splitPath := strings.Split(gmsaCrdManifestPath, `\`) @@ -283,7 +283,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin }, }, } - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "cmd", "/S", "/C", fmt.Sprintf("type %s", gmsaCrdManifestPath)) if err != nil { @@ -297,7 +297,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin // deployGmsaWebhook deploys the GMSA webhook, and returns a cleanup function // to be called when done with testing, that removes the temp files it's created // on disks as well as the API resources it's created. -func deployGmsaWebhook(f *framework.Framework) error { +func deployGmsaWebhook(ctx context.Context, f *framework.Framework) error { deployerName := "webhook-deployer" deployerNamespace := f.Namespace.Name webHookName := "gmsa-webhook" @@ -317,8 +317,8 @@ func deployGmsaWebhook(f *framework.Framework) error { }) // ensure the deployer has ability to approve certificatesigningrequests to install the webhook - s := createServiceAccount(f) - bindClusterRBACRoleToServiceAccount(f, s, "cluster-admin") + s := createServiceAccount(ctx, f) + bindClusterRBACRoleToServiceAccount(ctx, f, s, "cluster-admin") installSteps := []string{ "echo \"@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing/\" >> /etc/apk/repositories", @@ -357,11 +357,11 @@ func deployGmsaWebhook(f *framework.Framework) error { }, }, } - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) // Wait for the Webhook deployment to become ready. The deployer pod takes a few seconds to initialize and create resources err := waitForDeployment(func() (*appsv1.Deployment, error) { - return f.ClientSet.AppsV1().Deployments(webHookNamespace).Get(context.TODO(), webHookName, metav1.GetOptions{}) + return f.ClientSet.AppsV1().Deployments(webHookNamespace).Get(ctx, webHookName, metav1.GetOptions{}) }, 10*time.Second, f.Timeouts.PodStart) if err == nil { framework.Logf("GMSA webhook successfully deployed") @@ -370,7 +370,7 @@ func deployGmsaWebhook(f *framework.Framework) error { } // Dump deployer logs - logs, _ := e2epod.GetPodLogs(f.ClientSet, deployerNamespace, deployerName, deployerName) + logs, _ := e2epod.GetPodLogs(ctx, f.ClientSet, deployerNamespace, deployerName, deployerName) framework.Logf("GMSA deployment logs:\n%s", logs) return err @@ -409,7 +409,7 @@ func createGmsaCustomResource(ns string, crdManifestContents string) error { // createRBACRoleForGmsa creates an RBAC cluster role to grant use // access to our test credential spec. // It returns the role's name, as well as a function to delete it when done. -func createRBACRoleForGmsa(f *framework.Framework) (string, error) { +func createRBACRoleForGmsa(ctx context.Context, f *framework.Framework) (string, error) { roleName := f.Namespace.Name + "-rbac-role" role := &rbacv1.ClusterRole{ @@ -427,7 +427,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, error) { } ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.RbacV1().ClusterRoles().Delete), roleName, metav1.DeleteOptions{}) - _, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().ClusterRoles().Create(ctx, role, metav1.CreateOptions{}) if err != nil { err = fmt.Errorf("unable to create RBAC cluster role %q: %w", roleName, err) } @@ -436,7 +436,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, error) { } // createServiceAccount creates a service account, and returns its name. -func createServiceAccount(f *framework.Framework) string { +func createServiceAccount(ctx context.Context, f *framework.Framework) string { accountName := f.Namespace.Name + "-sa-" + string(uuid.NewUUID()) account := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -444,14 +444,14 @@ func createServiceAccount(f *framework.Framework) string { Namespace: f.Namespace.Name, }, } - if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), account, metav1.CreateOptions{}); err != nil { + if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, account, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create service account %q: %v", accountName, err) } return accountName } // bindRBACRoleToServiceAccount binds the given RBAC cluster role to the given service account. -func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) { +func bindRBACRoleToServiceAccount(ctx context.Context, f *framework.Framework, serviceAccountName, rbacRoleName string) { binding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: f.Namespace.Name + "-rbac-binding", @@ -470,11 +470,11 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb Name: rbacRoleName, }, } - _, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(ctx, binding, metav1.CreateOptions{}) framework.ExpectNoError(err) } -func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) { +func bindClusterRBACRoleToServiceAccount(ctx context.Context, f *framework.Framework, serviceAccountName, rbacRoleName string) { binding := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: f.Namespace.Name + "-rbac-binding", @@ -493,12 +493,12 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN Name: rbacRoleName, }, } - _, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{}) framework.ExpectNoError(err) } // createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name. -func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string { +func createPodWithGmsa(ctx context.Context, f *framework.Framework, serviceAccountName string) string { podName := "pod-with-gmsa" credSpecName := gmsaCustomResourceName @@ -527,7 +527,7 @@ func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string }, }, } - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) return podName } diff --git a/test/e2e/windows/gmsa_kubelet.go b/test/e2e/windows/gmsa_kubelet.go index beb28184f18..50277d08718 100644 --- a/test/e2e/windows/gmsa_kubelet.go +++ b/test/e2e/windows/gmsa_kubelet.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() { } ginkgo.By("creating a pod with correct GMSA specs") - e2epod.NewPodClient(f).CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking the domain reported by nltest in the containers") namespaceOption := fmt.Sprintf("--namespace=%s", f.Namespace.Name) @@ -112,7 +112,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() { // even for bogus creds, `nltest /PARENTDOMAIN` simply returns the AD domain, which is enough for our purpose here. // note that the "eventually" part seems to be needed to account for the fact that powershell containers // are a bit slow to become responsive, even when docker reports them as running. - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") return err == nil }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) diff --git a/test/e2e/windows/host_process.go b/test/e2e/windows/host_process.go index 9282c002f4b..dfde454460c 100644 --- a/test/e2e/windows/host_process.go +++ b/test/e2e/windows/host_process.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.It("should run as a process on the host/node", func(ctx context.Context) { ginkgo.By("selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) @@ -128,14 +128,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, podName, metav1.GetOptions{}) @@ -180,21 +180,21 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error retrieving pod") if p.Status.Phase != v1.PodSucceeded { - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "read-configuration") + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "read-configuration") if err != nil { framework.Logf("Error pulling logs: %v", err) } @@ -212,7 +212,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi // See https://github.com/kubernetes/enhancements/blob/master/keps/sig-windows/1981-windows-privileged-container-support/README.md // for more details. ginkgo.By("Ensuring Windows nodes are running containerd v1.6.x") - windowsNode, err := findWindowsNode(f) + windowsNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "error finding Windows node") r, v, err := getNodeContainerRuntimeAndVersion(windowsNode) framework.ExpectNoError(err, "error getting node container runtime and version") @@ -418,14 +418,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By(fmt.Sprintf("Waiting for pod '%s' to run", podName)) - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, podName, metav1.GetOptions{}) @@ -440,7 +440,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi "involvedObject.namespace": f.Namespace.Name, }.AsSelector().String(), } - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, options) framework.ExpectNoError(err, "Error getting events for failed pod") for _, event := range events.Items { framework.Logf("%s: %s", event.Reason, event.Message) @@ -468,7 +468,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi "validation-script": validation_script, }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(ctx, configMap, metav1.CreateOptions{}) framework.ExpectNoError(err, "unable to create create configmap") ginkgo.By("Creating a secret containing test data") @@ -485,7 +485,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi "foo": []byte("bar"), }, } - _, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(ctx, secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "unable to create secret") ginkgo.By("Creating a pod with a HostProcess container that uses various types of volume mounts") @@ -493,18 +493,18 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi podAndContainerName := "host-process-volume-mounts" pod := makeTestPodWithVolumeMounts(podAndContainerName) - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish(podAndContainerName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podAndContainerName, 3*time.Minute) - logs, err := e2epod.GetPodLogs(f.ClientSet, ns.Name, podAndContainerName, podAndContainerName) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns.Name, podAndContainerName, podAndContainerName) framework.ExpectNoError(err, "Error getting pod logs") framework.Logf("Container logs: %s", logs) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(ns.Name).Get( - context.TODO(), + ctx, podAndContainerName, metav1.GetOptions{}) @@ -514,12 +514,12 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.It("metrics should report count of started and failed to start HostProcess containers", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) ginkgo.By("Getting initial kubelet metrics values") - beforeMetrics, err := getCurrentHostProcessMetrics(f, targetNode.Name) + beforeMetrics, err := getCurrentHostProcessMetrics(ctx, f, targetNode.Name) framework.ExpectNoError(err, "Error getting initial kubelet metrics for node") framework.Logf("Initial HostProcess container metrics -- StartedContainers: %v, StartedContainersErrors: %v, StartedInitContainers: %v, StartedInitContainersErrors: %v", beforeMetrics.StartedContainersCount, beforeMetrics.StartedContainersErrorCount, beforeMetrics.StartedInitContainersCount, beforeMetrics.StartedInitContainersErrorCount) @@ -565,8 +565,8 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).Create(ctx, pod) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Scheduling a pod with a HostProcess container that will fail") podName = "host-process-metrics-pod-failing-container" @@ -599,12 +599,12 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).Create(ctx, pod) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Getting subsequent kubelet metrics values") - afterMetrics, err := getCurrentHostProcessMetrics(f, targetNode.Name) + afterMetrics, err := getCurrentHostProcessMetrics(ctx, f, targetNode.Name) framework.ExpectNoError(err, "Error getting subsequent kubelet metrics for node") framework.Logf("Subsequent HostProcess container metrics -- StartedContainers: %v, StartedContainersErrors: %v, StartedInitContainers: %v, StartedInitContainersErrors: %v", afterMetrics.StartedContainersCount, afterMetrics.StartedContainersErrorCount, afterMetrics.StartedInitContainersCount, afterMetrics.StartedInitContainersErrorCount) @@ -620,7 +620,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.It("container stats validation", func(ctx context.Context) { ginkgo.By("selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) @@ -651,14 +651,14 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for the pod to start running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string)) + e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string)) ginkgo.By("Getting container stats for pod") - nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name) + nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name) framework.ExpectNoError(err, "Error getting node stats") statsChecked := false @@ -700,7 +700,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.It("should support querying api-server using in-cluster config", func(ctx context.Context) { // This functionality is only support on containerd v1.7+ ginkgo.By("Ensuring Windows nodes are running containerd v1.7+") - windowsNode, err := findWindowsNode(f) + windowsNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "error finding Windows node") r, v, err := getNodeContainerRuntimeAndVersion(windowsNode) framework.ExpectNoError(err, "error getting node container runtime and version") @@ -748,10 +748,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi } pc := e2epod.NewPodClient(f) - pc.Create(pod) + pc.Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string)) + e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string)) ginkgo.By("Waiting for 60 seconds") // We wait an additional 60 seconds after the pod is Running because the @@ -760,7 +760,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi time.Sleep(60 * time.Second) ginkgo.By("Ensuring the test app was able to successfully query the api-server") - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "hpc-agnhost") + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "hpc-agnhost") framework.ExpectNoError(err, "Error getting pod logs") framework.Logf("Logs: %s\n", logs) @@ -779,7 +779,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.It("should run as localgroup accounts", func(ctx context.Context) { // This functionality is only supported on containerd v1.7+ ginkgo.By("Ensuring Windows nodes are running containerd v1.7+") - windowsNode, err := findWindowsNode(f) + windowsNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "error finding Windows node") r, v, err := getNodeContainerRuntimeAndVersion(windowsNode) framework.ExpectNoError(err, "error getting node container runtime and version") @@ -835,10 +835,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( @@ -854,7 +854,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi // because all of the 'built-in' accounts that can be used with HostProcess // are prefixed with this. ginkgo.By("Then ensuring pod was not running as a system account") - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, "localgroup-container") + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, "localgroup-container") framework.ExpectNoError(err, "error retrieving container logs") framework.Logf("Pod logs: %s", logs) framework.ExpectEqual( @@ -1005,10 +1005,10 @@ type HostProcessContainersMetrics struct { // getCurrentHostProcessMetrics returns a HostPRocessContainersMetrics object. Any metrics that do not have any // values reported will be set to 0. -func getCurrentHostProcessMetrics(f *framework.Framework, nodeName string) (HostProcessContainersMetrics, error) { +func getCurrentHostProcessMetrics(ctx context.Context, f *framework.Framework, nodeName string) (HostProcessContainersMetrics, error) { var result HostProcessContainersMetrics - metrics, err := e2emetrics.GetKubeletMetrics(f.ClientSet, nodeName) + metrics, err := e2emetrics.GetKubeletMetrics(ctx, f.ClientSet, nodeName) if err != nil { return result, err } diff --git a/test/e2e/windows/hybrid_network.go b/test/e2e/windows/hybrid_network.go index 7614f0b03fb..3a65e7a83b9 100644 --- a/test/e2e/windows/hybrid_network.go +++ b/test/e2e/windows/hybrid_network.go @@ -58,44 +58,44 @@ var _ = SIGDescribe("Hybrid cluster network", func() { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") - linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod) + linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod) windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) windowsPod.Spec.Containers[0].Args = []string{"test-webserver"} ginkgo.By("creating a windows pod and waiting for it to be running") - windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod) + windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod) ginkgo.By("verifying pod internal connectivity to the cluster dataplane") ginkgo.By("checking connectivity from Linux to Windows") - assertConsistentConnectivity(f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck(windowsPod.Status.PodIP, 80)) + assertConsistentConnectivity(ctx, f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck(windowsPod.Status.PodIP, 80)) ginkgo.By("checking connectivity from Windows to Linux") - assertConsistentConnectivity(f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck(linuxPod.Status.PodIP)) + assertConsistentConnectivity(ctx, f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck(linuxPod.Status.PodIP)) }) ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") - linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod) + linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod) ginkgo.By("verifying pod external connectivity to the internet") ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Linux") - assertConsistentConnectivity(f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53)) + assertConsistentConnectivity(ctx, f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53)) }) ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) ginkgo.By("creating a windows pod and waiting for it to be running") - windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod) + windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod) ginkgo.By("verifying pod external connectivity to the internet") ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Windows") - assertConsistentConnectivity(f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck("www.google.com")) + assertConsistentConnectivity(ctx, f, windowsPod.ObjectMeta.Name, windowsOS, windowsCheck("www.google.com")) }) }) @@ -107,7 +107,7 @@ var ( timeoutSeconds = 10 ) -func assertConsistentConnectivity(f *framework.Framework, podName string, os string, cmd []string) { +func assertConsistentConnectivity(ctx context.Context, f *framework.Framework, podName string, os string, cmd []string) { connChecker := func() error { ginkgo.By(fmt.Sprintf("checking connectivity of %s-container in %s", os, podName)) // TODO, we should be retrying this similar to what is done in DialFromNode, in the test/e2e/networking/networking.go tests @@ -117,8 +117,8 @@ func assertConsistentConnectivity(f *framework.Framework, podName string, os str } return err } - gomega.Eventually(connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) - gomega.Consistently(connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) + gomega.Eventually(ctx, connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) + gomega.Consistently(ctx, connChecker, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) } func linuxCheck(address string, port int) []string { diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index bfb20312892..f9c90940e60 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -47,18 +47,18 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.It("should return within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) ginkgo.By("Scheduling 10 pods") powershellImage := imageutils.GetConfig(imageutils.BusyBox) pods := newKubeletStatsTestPods(10, powershellImage, targetNode.Name) - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 5 times and checking average duration") @@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { for i := 0; i < iterations; i++ { start := time.Now() - nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name) + nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name) duration := time.Since(start) totalDurationMs += duration.Milliseconds() @@ -122,7 +122,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.Context("when windows is booted", func() { ginkgo.It("should return bootid within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) @@ -138,18 +138,18 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.It("should return within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) ginkgo.By("Scheduling 3 pods") powershellImage := imageutils.GetConfig(imageutils.BusyBox) pods := newKubeletStatsTestPods(3, powershellImage, targetNode.Name) - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 1 time") @@ -158,7 +158,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { for i := 0; i < iterations; i++ { start := time.Now() - nodeStats, err := e2ekubelet.GetStatsSummary(f.ClientSet, targetNode.Name) + nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name) duration := time.Since(start) totalDurationMs += duration.Milliseconds() @@ -206,9 +206,9 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { }) // findWindowsNode finds a Windows node that is Ready and Schedulable -func findWindowsNode(f *framework.Framework) (v1.Node, error) { +func findWindowsNode(ctx context.Context, f *framework.Framework) (v1.Node, error) { selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return v1.Node{}, err diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index a7950d68dfb..60c7c3622af 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -52,13 +52,13 @@ var _ = SIGDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", func() { ginkgo.Context("Allocatable node memory", func() { ginkgo.It("should be equal to a calculated allocatable memory value", func(ctx context.Context) { - checkNodeAllocatableTest(f) + checkNodeAllocatableTest(ctx, f) }) }) ginkgo.Context("attempt to deploy past allocatable memory limits", func() { ginkgo.It("should fail deployments of pods once there isn't enough memory", func(ctx context.Context) { - overrideAllocatableMemoryTest(f, framework.TestContext.CloudConfig.NumNodes) + overrideAllocatableMemoryTest(ctx, f, framework.TestContext.CloudConfig.NumNodes) }) }) @@ -81,9 +81,9 @@ type nodeMemory struct { // runDensityBatchTest runs the density batch pod creation test // checks that a calculated value for NodeAllocatable is equal to the reported value -func checkNodeAllocatableTest(f *framework.Framework) { +func checkNodeAllocatableTest(ctx context.Context, f *framework.Framework) { - nodeMem := getNodeMemory(f) + nodeMem := getNodeMemory(ctx, f) framework.Logf("nodeMem says: %+v", nodeMem) // calculate the allocatable mem based on capacity - reserved amounts @@ -101,9 +101,9 @@ func checkNodeAllocatableTest(f *framework.Framework) { // Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable // memory, then confirms that the last pod failed because of failedScheduling -func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) { +func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework, allocatablePods int) { selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: selector.String(), }) framework.ExpectNoError(err) @@ -133,7 +133,7 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) NodeName: node.Name, }, } - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } podName := "mem-failure-pod" @@ -158,10 +158,10 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) }, }, } - failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), failurePod, metav1.CreateOptions{}) + failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, failurePod, metav1.CreateOptions{}) framework.ExpectNoError(err) - gomega.Eventually(func() bool { - eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + gomega.Eventually(ctx, func() bool { + eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range eventList.Items { // Look for an event that shows FailedScheduling @@ -176,9 +176,9 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) } // getNodeMemory populates a nodeMemory struct with information from the first -func getNodeMemory(f *framework.Framework) nodeMemory { +func getNodeMemory(ctx context.Context, f *framework.Framework) nodeMemory { selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: selector.String(), }) framework.ExpectNoError(err) @@ -193,7 +193,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory { framework.Logf("Getting configuration details for node %s", nodeName) request := f.ClientSet.CoreV1().RESTClient().Get().Resource("nodes").Name(nodeName).SubResource("proxy").Suffix("configz") - rawbytes, err := request.DoRaw(context.Background()) + rawbytes, err := request.DoRaw(ctx) framework.ExpectNoError(err) kubeletConfig, err := decodeConfigz(rawbytes) framework.ExpectNoError(err) diff --git a/test/e2e/windows/reboot_node.go b/test/e2e/windows/reboot_node.go index 6f0ca3f4b43..b14ebb90554 100644 --- a/test/e2e/windows/reboot_node.go +++ b/test/e2e/windows/reboot_node.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV ginkgo.It("should run as a reboot process on the host/node", func(ctx context.Context) { ginkgo.By("selecting a Windows node") - targetNode, err := findWindowsNode(f) + targetNode, err := findWindowsNode(ctx, f) framework.ExpectNoError(err, "Error finding Windows node") framework.Logf("Using node: %v", targetNode.Name) @@ -74,7 +74,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV } agnPod.Spec.Containers[0].Args = []string{"test-webserver"} ginkgo.By("creating a windows pod and waiting for it to be running") - agnPod = e2epod.NewPodClient(f).CreateSync(agnPod) + agnPod = e2epod.NewPodClient(f).CreateSync(ctx, agnPod) // Create Linux pod to ping the windows pod linuxBusyBoxImage := imageutils.GetE2EImage(imageutils.Nginx) @@ -107,16 +107,16 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } ginkgo.By("Waiting for the Linux pod to run") - nginxPod = e2epod.NewPodClient(f).CreateSync(nginxPod) + nginxPod = e2epod.NewPodClient(f).CreateSync(ctx, nginxPod) ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Linux") - assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck("8.8.8.8", 53)) + assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck("8.8.8.8", 53)) ginkgo.By("checking connectivity to www.google.com from Windows") - assertConsistentConnectivity(f, agnPod.ObjectMeta.Name, "windows", windowsCheck("www.google.com")) + assertConsistentConnectivity(ctx, f, agnPod.ObjectMeta.Name, "windows", windowsCheck("www.google.com")) ginkgo.By("checking connectivity from Linux to Windows for the first time") - assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPod.Status.PodIP, 80)) + assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPod.Status.PodIP, 80)) initialRestartCount := podutil.GetExistingContainerStatus(agnPod.Status.ContainerStatuses, "windows-container").RestartCount @@ -156,14 +156,14 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } - e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, podName, metav1.GetOptions{}) @@ -185,7 +185,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV break FOR } ginkgo.By("Then checking existed agn-test-pod is running on the rebooted host") - agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), agnPod.Name, metav1.GetOptions{}) + agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, agnPod.Name, metav1.GetOptions{}) if err == nil { lastRestartCount := podutil.GetExistingContainerStatus(agnPodOut.Status.ContainerStatuses, "windows-container").RestartCount restartCount = int(lastRestartCount - initialRestartCount) @@ -197,10 +197,10 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV ginkgo.By("Checking whether agn-test-pod is rebooted") framework.ExpectEqual(restartCount, 1) - agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), agnPod.Name, metav1.GetOptions{}) + agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, agnPod.Name, metav1.GetOptions{}) framework.ExpectEqual(agnPodOut.Status.Phase, v1.PodRunning) framework.ExpectNoError(err, "getting pod info after reboot") - assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPodOut.Status.PodIP, 80)) + assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPodOut.Status.PodIP, 80)) // create another host process pod to check system boot time checkPod := &v1.Pod{ @@ -239,14 +239,14 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } - e2epod.NewPodClient(f).Create(checkPod) + e2epod.NewPodClient(f).Create(ctx, checkPod) ginkgo.By("Waiting for pod to run") - e2epod.NewPodClient(f).WaitForFinish("check-reboot-pod", 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(ctx, "check-reboot-pod", 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( - context.TODO(), + ctx, "check-reboot-pod", metav1.GetOptions{}) diff --git a/test/e2e/windows/security_context.go b/test/e2e/windows/security_context.go index 887c6179bb9..daff717bea1 100644 --- a/test/e2e/windows/security_context.go +++ b/test/e2e/windows/security_context.go @@ -47,15 +47,15 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { ginkgo.It("should be able create pods and run containers with a given username", func(ctx context.Context) { ginkgo.By("Creating 2 pods: 1 with the default user, and one with a custom one.") podDefault := runAsUserNamePod(nil) - e2eoutput.TestContainerOutput(f, "check default user", podDefault, 0, []string{"ContainerUser"}) + e2eoutput.TestContainerOutput(ctx, f, "check default user", podDefault, 0, []string{"ContainerUser"}) podUserName := runAsUserNamePod(toPtr("ContainerAdministrator")) - e2eoutput.TestContainerOutput(f, "check set user", podUserName, 0, []string{"ContainerAdministrator"}) + e2eoutput.TestContainerOutput(ctx, f, "check set user", podUserName, 0, []string{"ContainerAdministrator"}) }) ginkgo.It("should not be able to create pods with unknown usernames at Pod level", func(ctx context.Context) { ginkgo.By("Creating a pod with an invalid username") - podInvalid := e2epod.NewPodClient(f).Create(runAsUserNamePod(toPtr("FooLish"))) + podInvalid := e2epod.NewPodClient(f).Create(ctx, runAsUserNamePod(toPtr("FooLish"))) failedSandboxEventSelector := fields.Set{ "involvedObject.kind": "Pod", @@ -72,8 +72,8 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { // Not all runtimes use the sandbox information. This means the test needs to check if the pod // sandbox failed or workload pod failed. framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name) - gomega.Eventually(func() bool { - failedSandbox, err := eventOccurred(f.ClientSet, podInvalid.Namespace, failedSandboxEventSelector, hcsschimError) + gomega.Eventually(ctx, func(ctx context.Context) bool { + failedSandbox, err := eventOccurred(ctx, f.ClientSet, podInvalid.Namespace, failedSandboxEventSelector, hcsschimError) if err != nil { framework.Logf("Error retrieving events for pod. Ignoring...") } @@ -83,7 +83,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { } framework.Logf("No Sandbox error found. Looking for failure in workload pods") - pod, err := e2epod.NewPodClient(f).Get(context.Background(), podInvalid.Name, metav1.GetOptions{}) + pod, err := e2epod.NewPodClient(f).Get(ctx, podInvalid.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error retrieving pod: %s", err) return false @@ -104,12 +104,12 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { ginkgo.By("Creating a pod with an invalid username at container level and pod running as ContainerUser") p := runAsUserNamePod(toPtr("FooLish")) p.Spec.SecurityContext.WindowsOptions.RunAsUserName = toPtr("ContainerUser") - podInvalid := e2epod.NewPodClient(f).Create(p) + podInvalid := e2epod.NewPodClient(f).Create(ctx, p) framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name) - framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podInvalid.Name, "", f.Namespace.Name)) + framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, podInvalid.Name, "", f.Namespace.Name)) - podInvalid, _ = e2epod.NewPodClient(f).Get(context.TODO(), podInvalid.Name, metav1.GetOptions{}) + podInvalid, _ = e2epod.NewPodClient(f).Get(ctx, podInvalid.Name, metav1.GetOptions{}) podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName] if podTerminatedReason != "ContainerCannotRun" && podTerminatedReason != "StartError" { framework.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun' or 'StartError', not: '%q'", podTerminatedReason) @@ -127,8 +127,8 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { Command: []string{"cmd", "/S", "/C", "echo %username%"}, }) - e2eoutput.TestContainerOutput(f, "check overridden username", pod, 0, []string{"ContainerUser"}) - e2eoutput.TestContainerOutput(f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"}) + e2eoutput.TestContainerOutput(ctx, f, "check overridden username", pod, 0, []string{"ContainerUser"}) + e2eoutput.TestContainerOutput(ctx, f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"}) }) ginkgo.It("should ignore Linux Specific SecurityContext if set", func(ctx context.Context) { @@ -147,11 +147,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { SELinuxOptions: &v1.SELinuxOptions{Level: "s0:c24,c9"}, WindowsOptions: &v1.WindowsSecurityContextOptions{RunAsUserName: &containerUserName}} windowsPodWithSELinux.Spec.Tolerations = []v1.Toleration{{Key: "os", Value: "Windows"}} - windowsPodWithSELinux, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), + windowsPodWithSELinux, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, windowsPodWithSELinux, metav1.CreateOptions{}) framework.ExpectNoError(err) framework.Logf("Created pod %v", windowsPodWithSELinux) - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, windowsPodWithSELinux.Name, + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, windowsPodWithSELinux.Name, f.Namespace.Name), "failed to wait for pod %s to be running", windowsPodWithSELinux.Name) }) @@ -161,11 +161,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { p := runAsUserNamePod(toPtr("ContainerAdministrator")) p.Spec.SecurityContext.RunAsNonRoot = &trueVar - podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), p, metav1.CreateOptions{}) + podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, p, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod") ginkgo.By("Waiting for pod to finish") - event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid) + event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, podInvalid) framework.ExpectNoError(err) framework.ExpectNotEqual(event, nil, "event should not be empty") framework.Logf("Got event: %v", event) @@ -179,11 +179,11 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { p := runAsUserNamePod(toPtr("CONTAINERADMINISTRATOR")) p.Spec.SecurityContext.RunAsNonRoot = &trueVar - podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), p, metav1.CreateOptions{}) + podInvalid, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, p, metav1.CreateOptions{}) framework.ExpectNoError(err, "Error creating pod") ginkgo.By("Waiting for pod to finish") - event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid) + event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, podInvalid) framework.ExpectNoError(err) framework.ExpectNotEqual(event, nil, "event should not be empty") framework.Logf("Got event: %v", event) @@ -226,10 +226,10 @@ func toPtr(s string) *string { return &s } -func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) (bool, error) { +func eventOccurred(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string) (bool, error) { options := metav1.ListOptions{FieldSelector: eventSelector} - events, err := c.CoreV1().Events(namespace).List(context.TODO(), options) + events, err := c.CoreV1().Events(namespace).List(ctx, options) if err != nil { return false, fmt.Errorf("got error while getting events: %v", err) } diff --git a/test/e2e/windows/service.go b/test/e2e/windows/service.go index 21bfea51b33..ecf944206a9 100644 --- a/test/e2e/windows/service.go +++ b/test/e2e/windows/service.go @@ -51,11 +51,11 @@ var _ = SIGDescribe("Services", func() { ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) - nodeIP, err := e2enode.PickIP(jig.Client) + nodeIP, err := e2enode.PickIP(ctx, jig.Client) framework.ExpectNoError(err) ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns) - svc, err := jig.CreateTCPService(func(svc *v1.Service) { + svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) framework.ExpectNoError(err) @@ -69,20 +69,20 @@ var _ = SIGDescribe("Services", func() { "kubernetes.io/os": "windows", } } - _, err = jig.Run(windowsNodeSelectorTweak) + _, err = jig.Run(ctx, windowsNodeSelectorTweak) framework.ExpectNoError(err) //using hybrid_network methods ginkgo.By("creating Windows testing Pod") testPod := createTestPod(f, windowsBusyBoximage, windowsOS) - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) ginkgo.By("verifying that pod has the correct nodeSelector") // Admission controllers may sometimes do the wrong thing framework.ExpectEqual(testPod.Spec.NodeSelector["kubernetes.io/os"], "windows") ginkgo.By(fmt.Sprintf("checking connectivity Pod to curl http://%s:%d", nodeIP, nodePort)) - assertConsistentConnectivity(f, testPod.ObjectMeta.Name, windowsOS, windowsCheck(fmt.Sprintf("http://%s", net.JoinHostPort(nodeIP, strconv.Itoa(nodePort))))) + assertConsistentConnectivity(ctx, f, testPod.ObjectMeta.Name, windowsOS, windowsCheck(fmt.Sprintf("http://%s", net.JoinHostPort(nodeIP, strconv.Itoa(nodePort))))) }) diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 48e403d6506..f8e3abcc0ec 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -69,26 +69,26 @@ var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() { ginkgo.It("container should have readOnly permissions on emptyDir", func(ctx context.Context) { ginkgo.By("creating a container with readOnly permissions on emptyDir volume") - doReadOnlyTest(f, emptyDirSource, emptyDirVolumePath) + doReadOnlyTest(ctx, f, emptyDirSource, emptyDirVolumePath) ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on emptyDir volume") - doReadWriteReadOnlyTest(f, emptyDirSource, emptyDirVolumePath) + doReadWriteReadOnlyTest(ctx, f, emptyDirSource, emptyDirVolumePath) }) ginkgo.It("container should have readOnly permissions on hostMapPath", func(ctx context.Context) { ginkgo.By("creating a container with readOnly permissions on hostMap volume") - doReadOnlyTest(f, hostMapSource, hostMapPath) + doReadOnlyTest(ctx, f, hostMapSource, hostMapPath) ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on hostMap volume") - doReadWriteReadOnlyTest(f, hostMapSource, hostMapPath) + doReadWriteReadOnlyTest(ctx, f, hostMapSource, hostMapPath) }) }) }) -func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) { +func doReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.VolumeSource, volumePath string) { var ( filePath = volumePath + "\\test-file.txt" podName = "pod-" + string(uuid.NewUUID()) @@ -98,7 +98,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s "kubernetes.io/os": "windows", } - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("verifying that pod has the correct nodeSelector") framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows") @@ -109,7 +109,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s framework.ExpectEqual(stderr, "Access is denied.") } -func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) { +func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.VolumeSource, volumePath string) { var ( filePath = volumePath + "\\test-file" + string(uuid.NewUUID()) podName = "pod-" + string(uuid.NewUUID()) @@ -132,7 +132,7 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol } pod.Spec.Containers = append(pod.Spec.Containers, rwcontainer) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("verifying that pod has the correct nodeSelector") framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows") diff --git a/test/e2e_kubeadm/bootstrap_token_test.go b/test/e2e_kubeadm/bootstrap_token_test.go index dc5e577afe9..ae2137128fd 100644 --- a/test/e2e_kubeadm/bootstrap_token_test.go +++ b/test/e2e_kubeadm/bootstrap_token_test.go @@ -54,7 +54,7 @@ var _ = Describe("bootstrap token", func() { ginkgo.It("should exist and be properly configured", func(ctx context.Context) { secrets, err := f.ClientSet.CoreV1(). Secrets(kubeSystemNamespace). - List(context.TODO(), metav1.ListOptions{}) + List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "error reading Secrets") tokenNum := 0 diff --git a/test/e2e_kubeadm/controlplane_nodes_test.go b/test/e2e_kubeadm/controlplane_nodes_test.go index bf087aa2ba1..8eec9a12bbd 100644 --- a/test/e2e_kubeadm/controlplane_nodes_test.go +++ b/test/e2e_kubeadm/controlplane_nodes_test.go @@ -51,22 +51,22 @@ var _ = Describe("control-plane node", func() { // in case you can skip this test with SKIP=multi-node ginkgo.It("should be labelled and tainted [multi-node]", func(ctx context.Context) { // get all control-plane nodes (and this implicitly checks that node are properly labeled) - controlPlanes := getControlPlaneNodes(f.ClientSet) + controlPlanes := getControlPlaneNodes(ctx, f.ClientSet) // checks if there is at least one control-plane node gomega.Expect(controlPlanes.Items).NotTo(gomega.BeEmpty(), "at least one node with label %s should exist. if you are running test on a single-node cluster, you can skip this test with SKIP=multi-node", controlPlaneLabel) // checks that the control-plane nodes have the expected taints for _, cp := range controlPlanes.Items { - e2enode.ExpectNodeHasTaint(f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule}) + e2enode.ExpectNodeHasTaint(ctx, f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule}) } }) }) -func getControlPlaneNodes(c clientset.Interface) *corev1.NodeList { +func getControlPlaneNodes(ctx context.Context, c clientset.Interface) *corev1.NodeList { selector := labels.Set{controlPlaneLabel: ""}.AsSelector() cpNodes, err := c.CoreV1().Nodes(). - List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err, "error reading control-plane nodes") return cpNodes } diff --git a/test/e2e_kubeadm/networking_test.go b/test/e2e_kubeadm/networking_test.go index 5acdfa545d4..a48399441a0 100644 --- a/test/e2e_kubeadm/networking_test.go +++ b/test/e2e_kubeadm/networking_test.go @@ -89,7 +89,7 @@ var _ = Describe("networking [setup-networking]", func() { netCC := cc["networking"].(map[interface{}]interface{}) if ps, ok := netCC["podSubnet"]; ok { // Check that the pod CIDR allocated to the node(s) is within the kubeadm-config podCIDR. - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "error listing nodes") for _, node := range nodes.Items { if !subnetWithinSubnet(ps.(string), node.Spec.PodCIDR) { @@ -114,7 +114,7 @@ var _ = Describe("networking [setup-networking]", func() { if ss, ok := netCC["serviceSubnet"]; ok { // Get the kubernetes service in the default namespace. // Check that service CIDR allocated is within the serviceSubnet range. - svc, err := f.ClientSet.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services("default").Get(ctx, "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Service %q from namespace %q", "kubernetes", "default") if !ipWithinSubnet(ss.(string), svc.Spec.ClusterIP) { framework.Failf("failed due to service(%v) cluster-IP %v not inside configured service subnet: %s", svc.Name, svc.Spec.ClusterIP, ss) @@ -137,7 +137,7 @@ var _ = Describe("networking [setup-networking]", func() { if _, ok := cc["networking"]; ok { netCC := cc["networking"].(map[interface{}]interface{}) if ps, ok := netCC["podSubnet"]; ok { - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "error listing nodes") // Check that the pod CIDRs allocated to the node(s) are within the kubeadm-config podCIDR. var found bool diff --git a/test/e2e_kubeadm/nodes_test.go b/test/e2e_kubeadm/nodes_test.go index 93cc062b03c..736c453a1b6 100644 --- a/test/e2e_kubeadm/nodes_test.go +++ b/test/e2e_kubeadm/nodes_test.go @@ -49,7 +49,7 @@ var _ = Describe("nodes", func() { ginkgo.It("should have CRI annotation", func(ctx context.Context) { nodes, err := f.ClientSet.CoreV1().Nodes(). - List(context.TODO(), metav1.ListOptions{}) + List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "error reading nodes") // Checks that the nodes have the CRI socket annotation diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 1b7e261be17..66e971c6058 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -58,11 +58,11 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should reject an unloaded profile", func(ctx context.Context) { - status := runAppArmorTest(f, false, v1.AppArmorBetaProfileNamePrefix+"non-existent-profile") + status := runAppArmorTest(ctx, f, false, v1.AppArmorBetaProfileNamePrefix+"non-existent-profile") gomega.Expect(status.ContainerStatuses[0].State.Waiting.Message).To(gomega.ContainSubstring("apparmor")) }) ginkgo.It("should enforce a profile blocking writes", func(ctx context.Context) { - status := runAppArmorTest(f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"deny-write") + status := runAppArmorTest(ctx, f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"deny-write") if len(status.ContainerStatuses) == 0 { framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) return @@ -73,7 +73,7 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() }) ginkgo.It("should enforce a permissive profile", func(ctx context.Context) { - status := runAppArmorTest(f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"audit-write") + status := runAppArmorTest(ctx, f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"audit-write") if len(status.ContainerStatuses) == 0 { framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) return @@ -89,7 +89,7 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should reject a pod with an AppArmor profile", func(ctx context.Context) { - status := runAppArmorTest(f, false, v1.AppArmorBetaProfileRuntimeDefault) + status := runAppArmorTest(ctx, f, false, v1.AppArmorBetaProfileRuntimeDefault) expectSoftRejection(status) }) }) @@ -149,11 +149,11 @@ func loadTestProfiles() error { return nil } -func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus { - pod := createPodWithAppArmor(f, profile) +func runAppArmorTest(ctx context.Context, f *framework.Framework, shouldRun bool, profile string) v1.PodStatus { + pod := createPodWithAppArmor(ctx, f, profile) if shouldRun { // The pod needs to start before it stops, so wait for the longer start timeout. - framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( + framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) } else { // Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor". @@ -161,11 +161,11 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. w := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return e2epod.NewPodClient(f).List(context.TODO(), options) + return e2epod.NewPodClient(f).List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return e2epod.NewPodClient(f).Watch(context.TODO(), options) + return e2epod.NewPodClient(f).Watch(ctx, options) }, } preconditionFunc := func(store cache.Store) (bool, error) { @@ -181,7 +181,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. return false, nil } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() _, err := watchtools.UntilWithSync(ctx, w, &v1.Pod{}, preconditionFunc, func(e watch.Event) (bool, error) { switch e.Type { @@ -202,12 +202,12 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. }) framework.ExpectNoError(err) } - p, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return p.Status } -func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod { +func createPodWithAppArmor(ctx context.Context, f *framework.Framework, profile string) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)), @@ -224,7 +224,7 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod { RestartPolicy: v1.RestartPolicyNever, }, } - return e2epod.NewPodClient(f).Create(pod) + return e2epod.NewPodClient(f).Create(ctx, pod) } func expectSoftRejection(status v1.PodStatus) { diff --git a/test/e2e_node/checkpoint_container.go b/test/e2e_node/checkpoint_container.go index baf21473486..a409a0c4ca9 100644 --- a/test/e2e_node/checkpoint_container.go +++ b/test/e2e_node/checkpoint_container.go @@ -42,7 +42,7 @@ const ( ) // proxyPostRequest performs a post on a node proxy endpoint given the nodename and rest client. -func proxyPostRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) { +func proxyPostRequest(ctx context.Context, c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165 var result restclient.Result finished := make(chan struct{}, 1) @@ -52,13 +52,15 @@ func proxyPostRequest(c clientset.Interface, node, endpoint string, port int) (r SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, port)). Suffix(endpoint). - Do(context.TODO()) + Do(ctx) finished <- struct{}{} }() select { case <-finished: return result, nil + case <-ctx.Done(): + return restclient.Result{}, nil case <-time.After(proxyTimeout): return restclient.Result{}, nil } @@ -70,7 +72,7 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) { ginkgo.By("creating a target pod") podClient := e2epod.NewPodClient(f) - pod := podClient.CreateSync(&v1.Pod{ + pod := podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "checkpoint-container-pod"}, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -85,7 +87,7 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu }) p, err := podClient.Get( - context.TODO(), + ctx, pod.Name, metav1.GetOptions{}, ) @@ -105,6 +107,7 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu pod.Spec.NodeName, ) result, err := proxyPostRequest( + ctx, f.ClientSet, pod.Spec.NodeName, fmt.Sprintf( diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index aadd300d7d9..48384be8218 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -45,13 +45,13 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() f := framework.NewDefaultFramework("container-log-rotation-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("when a container generates a lot of log", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.ContainerLogMaxFiles = testContainerLogMaxFiles initialConfig.ContainerLogMaxSize = testContainerLogMaxSize }) var logRotationPod *v1.Pod - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("create log container") pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -73,7 +73,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() }, }, } - logRotationPod = e2epod.NewPodClient(f).CreateSync(pod) + logRotationPod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute) }) @@ -88,7 +88,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() framework.ExpectNoError(err) logPath := resp.GetStatus().GetLogPath() ginkgo.By("wait for container log being rotated to max file limit") - gomega.Eventually(func() (int, error) { + gomega.Eventually(ctx, func() (int, error) { logs, err := kubelogs.GetAllLogs(logPath) if err != nil { return 0, err @@ -96,7 +96,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() return len(logs), nil }, rotationEventuallyTimeout, rotationPollInterval).Should(gomega.Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit") ginkgo.By("make sure container log number won't exceed max file limit") - gomega.Consistently(func() (int, error) { + gomega.Consistently(ctx, func() (int, error) { logs, err := kubelogs.GetAllLogs(logPath) if err != nil { return 0, err diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 7776f997ce5..69014457807 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -86,7 +86,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) framework.ExpectNoError(err, "failed to get list of container runtime pids") for _, pid := range runtimePids { - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { return validateOOMScoreAdjSetting(pid, -999) }, 5*time.Minute, 30*time.Second).Should(gomega.BeNil()) } @@ -95,7 +95,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { kubeletPids, err := getPidsForProcess(kubeletProcessName, "") framework.ExpectNoError(err, "failed to get list of kubelet pids") framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids)) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { return validateOOMScoreAdjSetting(kubeletPids[0], -999) }, 5*time.Minute, 30*time.Second).Should(gomega.BeNil()) }) @@ -110,7 +110,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { podClient := e2epod.NewPodClient(f) podName := "besteffort" + string(uuid.NewUUID()) - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -126,7 +126,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { var pausePids []int ginkgo.By("checking infra container's oom-score-adj") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { pausePids, err = getPidsForProcess("pause", "") if err != nil { return fmt.Errorf("failed to get list of pause pids: %v", err) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) var shPids []int ginkgo.By("checking besteffort container's oom-score-adj") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { shPids, err = getPidsForProcess("agnhost", "") if err != nil { return fmt.Errorf("failed to get list of serve hostname process pids: %v", err) @@ -177,7 +177,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { ginkgo.It("guaranteed container's oom-score-adj should be -998", func(ctx context.Context) { podClient := e2epod.NewPodClient(f) podName := "guaranteed" + string(uuid.NewUUID()) - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -200,7 +200,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { ngPids []int err error ) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { ngPids, err = getPidsForProcess("nginx", "") if err != nil { return fmt.Errorf("failed to get list of nginx process pids: %v", err) @@ -218,7 +218,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func(ctx context.Context) { podClient := e2epod.NewPodClient(f) podName := "burstable" + string(uuid.NewUUID()) - podClient.Create(&v1.Pod{ + podClient.Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -242,7 +242,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { wsPids []int err error ) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { wsPids, err = getPidsForProcess("agnhost", "") if err != nil { return fmt.Errorf("failed to get list of test-webserver process pids: %v", err) diff --git a/test/e2e_node/cpu_manager_metrics_test.go b/test/e2e_node/cpu_manager_metrics_test.go index 7f48e363384..c90ecc14649 100644 --- a/test/e2e_node/cpu_manager_metrics_test.go +++ b/test/e2e_node/cpu_manager_metrics_test.go @@ -48,15 +48,15 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { var testPod *v1.Pod var smtLevel int - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var err error if oldCfg == nil { - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) } fullCPUsOnlyOpt := fmt.Sprintf("option=%s", cpumanager.FullPCPUsOnlyOption) - _, cpuAlloc, _ := getLocalNodeCPUDetails(f) + _, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f) smtLevel = getSMTLevel() // strict SMT alignment is trivially verified and granted on non-SMT systems @@ -84,14 +84,14 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { options: cpuPolicyOptions, }, ) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if testPod != nil { - deletePodSyncByName(f, testPod.Name) + deletePodSyncByName(ctx, f, testPod.Name) } - updateKubeletConfig(f, oldCfg, true) + updateKubeletConfig(ctx, f, oldCfg, true) }) ginkgo.It("should report zero pinning counters after a fresh restart", func(ctx context.Context) { @@ -116,7 +116,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { ginkgo.It("should report pinning failures when the cpumanager allocation is known to fail", func(ctx context.Context) { ginkgo.By("Creating the test pod which will be rejected for SMTAlignmentError") - testPod = e2epod.NewPodClient(f).Create(makeGuaranteedCPUExclusiveSleeperPod("smt-align-err", 1)) + testPod = e2epod.NewPodClient(f).Create(ctx, makeGuaranteedCPUExclusiveSleeperPod("smt-align-err", 1)) // we updated the kubelet config in BeforeEach, so we can assume we start fresh. // being [Serial], we can also assume noone else but us is running pods. @@ -139,7 +139,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { ginkgo.It("should not report any pinning failures when the cpumanager allocation is expected to succeed", func(ctx context.Context) { ginkgo.By("Creating the test pod") - testPod = e2epod.NewPodClient(f).Create(makeGuaranteedCPUExclusiveSleeperPod("smt-align-ok", smtLevel)) + testPod = e2epod.NewPodClient(f).Create(ctx, makeGuaranteedCPUExclusiveSleeperPod("smt-align-ok", smtLevel)) // we updated the kubelet config in BeforeEach, so we can assume we start fresh. // being [Serial], we can also assume noone else but us is running pods. @@ -162,10 +162,10 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { }) }) -func getCPUManagerMetrics() (e2emetrics.KubeletMetrics, error) { +func getCPUManagerMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) { // we are running out of good names, so we need to be unnecessarily specific to avoid clashes ginkgo.By("getting CPU Manager metrics from the metrics API") - return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") + return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics") } func makeGuaranteedCPUExclusiveSleeperPod(name string, cpus int) *v1.Pod { diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 8bf744ec6da..2ca23684834 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -83,24 +83,24 @@ func makeCPUManagerPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod { } } -func deletePodSyncByName(f *framework.Framework, podName string) { +func deletePodSyncByName(ctx context.Context, f *framework.Framework, podName string) { gp := int64(0) delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(podName, delOpts, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, e2epod.DefaultPodDeletionTimeout) } -func deletePods(f *framework.Framework, podNames []string) { +func deletePods(ctx context.Context, f *framework.Framework, podNames []string) { for _, podName := range podNames { - deletePodSyncByName(f, podName) + deletePodSyncByName(ctx, f, podName) } } -func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVal int64, cpuResVal int64) { - localNodeCap := getLocalNode(f).Status.Capacity +func getLocalNodeCPUDetails(ctx context.Context, f *framework.Framework) (cpuCapVal int64, cpuAllocVal int64, cpuResVal int64) { + localNodeCap := getLocalNode(ctx, f).Status.Capacity cpuCap := localNodeCap[v1.ResourceCPU] - localNodeAlloc := getLocalNode(f).Status.Allocatable + localNodeAlloc := getLocalNode(ctx, f).Status.Allocatable cpuAlloc := localNodeAlloc[v1.ResourceCPU] cpuRes := cpuCap.DeepCopy() cpuRes.Sub(cpuAlloc) @@ -111,11 +111,11 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa return cpuCap.Value(), cpuCap.Value() - cpuRes.Value(), cpuRes.Value() } -func waitForContainerRemoval(containerName, podName, podNS string) { +func waitForContainerRemoval(ctx context.Context, containerName, podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) - gomega.Eventually(func() bool { - containers, err := rs.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + gomega.Eventually(ctx, func(ctx context.Context) bool { + containers, err := rs.ListContainers(ctx, &runtimeapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS, @@ -216,7 +216,7 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku return newCfg } -func runGuPodTest(f *framework.Framework, cpuCount int) { +func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) { var pod *v1.Pod ctnAttrs := []ctnAttribute{ @@ -227,14 +227,14 @@ func runGuPodTest(f *framework.Framework, cpuCount int) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking if the expected cpuset was assigned") // any full CPU is fine - we cannot nor we should predict which one, though for _, cnt := range pod.Spec.Containers { ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name)) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", cnt.Name, pod.Name) framework.Logf("got pod logs: %v", logs) @@ -245,11 +245,11 @@ func runGuPodTest(f *framework.Framework, cpuCount int) { } ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod.Name}) - waitForAllContainerRemoval(pod.Name, pod.Namespace) + deletePods(ctx, f, []string{pod.Name}) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) } -func runNonGuPodTest(f *framework.Framework, cpuCap int64) { +func runNonGuPodTest(ctx context.Context, f *framework.Framework, cpuCap int64) { var ctnAttrs []ctnAttribute var err error var pod *v1.Pod @@ -263,7 +263,7 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) { }, } pod = makeCPUManagerPod("non-gu-pod", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking if the expected cpuset was assigned") expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1) @@ -271,16 +271,16 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) { if cpuCap == 1 { expAllowedCPUsListRegex = "^0\n$" } - err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod.Name}) - waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) + deletePods(ctx, f, []string{pod.Name}) + waitForContainerRemoval(ctx, pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) } -func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64) { +func runMultipleGuNonGuPods(ctx context.Context, f *framework.Framework, cpuCap int64, cpuAlloc int64) { var cpuListString, expAllowedCPUsListRegex string var cpuList []int var cpu1 int @@ -297,7 +297,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 }, } pod1 = makeCPUManagerPod("gu-pod", ctnAttrs) - pod1 = e2epod.NewPodClient(f).CreateSync(pod1) + pod1 = e2epod.NewPodClient(f).CreateSync(ctx, pod1) ctnAttrs = []ctnAttribute{ { @@ -307,7 +307,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 }, } pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs) - pod2 = e2epod.NewPodClient(f).CreateSync(pod2) + pod2 = e2epod.NewPodClient(f).CreateSync(ctx, pod2) ginkgo.By("checking if the expected cpuset was assigned") cpu1 = 1 @@ -321,7 +321,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod1.Spec.Containers[0].Name, pod1.Name) @@ -331,16 +331,16 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1))) } expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod2.Spec.Containers[0].Name, pod2.Name) ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod1.Name, pod2.Name}) - waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) - waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) + deletePods(ctx, f, []string{pod1.Name, pod2.Name}) + waitForContainerRemoval(ctx, pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) + waitForContainerRemoval(ctx, pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) } -func runMultipleCPUGuPod(f *framework.Framework) { +func runMultipleCPUGuPod(ctx context.Context, f *framework.Framework) { var cpuListString, expAllowedCPUsListRegex string var cpuList []int var cset cpuset.CPUSet @@ -356,7 +356,7 @@ func runMultipleCPUGuPod(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking if the expected cpuset was assigned") cpuListString = "1-2" @@ -378,16 +378,16 @@ func runMultipleCPUGuPod(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod.Name}) - waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) + deletePods(ctx, f, []string{pod.Name}) + waitForContainerRemoval(ctx, pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) } -func runMultipleCPUContainersGuPod(f *framework.Framework) { +func runMultipleCPUContainersGuPod(ctx context.Context, f *framework.Framework) { var expAllowedCPUsListRegex string var cpuList []int var cpu1, cpu2 int @@ -407,7 +407,7 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking if the expected cpuset was assigned") cpu1, cpu2 = 1, 2 @@ -429,21 +429,21 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[1].Name, pod.Name) ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod.Name}) - waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) - waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace) + deletePods(ctx, f, []string{pod.Name}) + waitForContainerRemoval(ctx, pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) + waitForContainerRemoval(ctx, pod.Spec.Containers[1].Name, pod.Name, pod.Namespace) } -func runMultipleGuPods(f *framework.Framework) { +func runMultipleGuPods(ctx context.Context, f *framework.Framework) { var expAllowedCPUsListRegex string var cpuList []int var cpu1, cpu2 int @@ -459,7 +459,7 @@ func runMultipleGuPods(f *framework.Framework) { }, } pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs) - pod1 = e2epod.NewPodClient(f).CreateSync(pod1) + pod1 = e2epod.NewPodClient(f).CreateSync(ctx, pod1) ctnAttrs = []ctnAttribute{ { @@ -469,7 +469,7 @@ func runMultipleGuPods(f *framework.Framework) { }, } pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs) - pod2 = e2epod.NewPodClient(f).CreateSync(pod2) + pod2 = e2epod.NewPodClient(f).CreateSync(ctx, pod2) ginkgo.By("checking if the expected cpuset was assigned") cpu1, cpu2 = 1, 2 @@ -491,18 +491,18 @@ func runMultipleGuPods(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod1.Spec.Containers[0].Name, pod1.Name) expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod2.Spec.Containers[0].Name, pod2.Name) ginkgo.By("by deleting the pods and waiting for container removal") - deletePods(f, []string{pod1.Name, pod2.Name}) - waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) - waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) + deletePods(ctx, f, []string{pod1.Name, pod2.Name}) + waitForContainerRemoval(ctx, pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) + waitForContainerRemoval(ctx, pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) } func runCPUManagerTests(f *framework.Framework) { @@ -515,16 +515,16 @@ func runCPUManagerTests(f *framework.Framework) { var ctnAttrs []ctnAttribute var pod *v1.Pod - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var err error if oldCfg == nil { - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) } }) ginkgo.It("should assign CPUs as expected based on the Pod spec", func(ctx context.Context) { - cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f) + cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) // Skip CPU Manager tests altogether if the CPU capacity < 2. if cpuCap < 2 { @@ -536,16 +536,16 @@ func runCPUManagerTests(f *framework.Framework) { policyName: string(cpumanager.PolicyStatic), reservedSystemCPUs: cpuset.CPUSet{}, }) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) ginkgo.By("running a non-Gu pod") - runNonGuPodTest(f, cpuCap) + runNonGuPodTest(ctx, f, cpuCap) ginkgo.By("running a Gu pod") - runGuPodTest(f, 1) + runGuPodTest(ctx, f, 1) ginkgo.By("running multiple Gu and non-Gu pods") - runMultipleGuNonGuPods(f, cpuCap, cpuAlloc) + runMultipleGuNonGuPods(ctx, f, cpuCap, cpuAlloc) // Skip rest of the tests if CPU capacity < 3. if cpuCap < 3 { @@ -553,13 +553,13 @@ func runCPUManagerTests(f *framework.Framework) { } ginkgo.By("running a Gu pod requesting multiple CPUs") - runMultipleCPUGuPod(f) + runMultipleCPUGuPod(ctx, f) ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs") - runMultipleCPUContainersGuPod(f) + runMultipleCPUContainersGuPod(ctx, f) ginkgo.By("running multiple Gu pods") - runMultipleGuPods(f) + runMultipleGuPods(ctx, f) ginkgo.By("test for automatically remove inactive pods from cpumanager state file.") // First running a Gu Pod, @@ -577,7 +577,7 @@ func runCPUManagerTests(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) ginkgo.By("checking if the expected cpuset was assigned") cpu1 = 1 @@ -591,20 +591,20 @@ func runCPUManagerTests(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) - deletePodSyncByName(f, pod.Name) + deletePodSyncByName(ctx, f, pod.Name) // we need to wait for all containers to really be gone so cpumanager reconcile loop will not rewrite the cpu_manager_state. // this is in turn needed because we will have an unavoidable (in the current framework) race with the // reconcile loop which will make our attempt to delete the state file and to restore the old config go haywire - waitForAllContainerRemoval(pod.Name, pod.Namespace) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) }) ginkgo.It("should assign CPUs as expected with enhanced policy based on strict SMT alignment", func(ctx context.Context) { fullCPUsOnlyOpt := fmt.Sprintf("option=%s", cpumanager.FullPCPUsOnlyOption) - _, cpuAlloc, _ = getLocalNodeCPUDetails(f) + _, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) smtLevel := getSMTLevel() // strict SMT alignment is trivially verified and granted on non-SMT systems @@ -632,19 +632,19 @@ func runCPUManagerTests(f *framework.Framework) { options: cpuPolicyOptions, }, ) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) // the order between negative and positive doesn't really matter - runSMTAlignmentNegativeTests(f) - runSMTAlignmentPositiveTests(f, smtLevel) + runSMTAlignmentNegativeTests(ctx, f) + runSMTAlignmentPositiveTests(ctx, f, smtLevel) }) - ginkgo.AfterEach(func() { - updateKubeletConfig(f, oldCfg, true) + ginkgo.AfterEach(func(ctx context.Context) { + updateKubeletConfig(ctx, f, oldCfg, true) }) } -func runSMTAlignmentNegativeTests(f *framework.Framework) { +func runSMTAlignmentNegativeTests(ctx context.Context, f *framework.Framework) { // negative test: try to run a container whose requests aren't a multiple of SMT level, expect a rejection ctnAttrs := []ctnAttribute{ { @@ -655,16 +655,16 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) { } pod := makeCPUManagerPod("gu-pod", ctnAttrs) // CreateSync would wait for pod to become Ready - which will never happen if production code works as intended! - pod = e2epod.NewPodClient(f).Create(pod) + pod = e2epod.NewPodClient(f).Create(ctx, pod) - err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { return true, nil } return false, nil }) framework.ExpectNoError(err) - pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { @@ -674,14 +674,14 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) { framework.Failf("pod %s failed for wrong reason: %q", pod.Name, pod.Status.Reason) } - deletePodSyncByName(f, pod.Name) + deletePodSyncByName(ctx, f, pod.Name) // we need to wait for all containers to really be gone so cpumanager reconcile loop will not rewrite the cpu_manager_state. // this is in turn needed because we will have an unavoidable (in the current framework) race with th // reconcile loop which will make our attempt to delete the state file and to restore the old config go haywire - waitForAllContainerRemoval(pod.Name, pod.Namespace) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) } -func runSMTAlignmentPositiveTests(f *framework.Framework, smtLevel int) { +func runSMTAlignmentPositiveTests(ctx context.Context, f *framework.Framework, smtLevel int) { // positive test: try to run a container whose requests are a multiple of SMT level, check allocated cores // 1. are core siblings // 2. take a full core @@ -695,12 +695,12 @@ func runSMTAlignmentPositiveTests(f *framework.Framework, smtLevel int) { }, } pod := makeCPUManagerPod("gu-pod", ctnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) for _, cnt := range pod.Spec.Containers { ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name)) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", cnt.Name, pod.Name) framework.Logf("got pod logs: %v", logs) @@ -710,11 +710,11 @@ func runSMTAlignmentPositiveTests(f *framework.Framework, smtLevel int) { validateSMTAlignment(cpus, smtLevel, pod, &cnt) } - deletePodSyncByName(f, pod.Name) + deletePodSyncByName(ctx, f, pod.Name) // we need to wait for all containers to really be gone so cpumanager reconcile loop will not rewrite the cpu_manager_state. // this is in turn needed because we will have an unavoidable (in the current framework) race with th // reconcile loop which will make our attempt to delete the state file and to restore the old config go haywire - waitForAllContainerRemoval(pod.Name, pod.Namespace) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) } func validateSMTAlignment(cpus cpuset.CPUSet, smtLevel int, pod *v1.Pod, cnt *v1.Container) { diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index bc06438553d..fa700fdb084 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] ginkgo.Context("when we need to admit a critical pod", func() { ginkgo.It("[Flaky] should be able to create and delete a critical pod", func(ctx context.Context) { // because adminssion Priority enable, If the priority class is not found, the Pod is rejected. - node := getNodeName(f) + node := getNodeName(ctx, f) // Define test pods nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -69,15 +69,15 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{ // request the entire resource capacity of the node, so that // admitting this pod requires the other pod to be preempted - Requests: getNodeCPUAndMemoryCapacity(f), + Requests: getNodeCPUAndMemoryCapacity(ctx, f), }, node) // Create pods, starting with non-critical so that the critical preempts the other pods. - e2epod.NewPodClient(f).CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) - e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(criticalPod) + e2epod.NewPodClient(f).CreateBatch(ctx, []*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(ctx, criticalPod) // Check that non-critical pods other than the besteffort have been evicted - updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range updatedPodList.Items { if p.Name == nonCriticalBestEffort.Name { @@ -87,22 +87,22 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] } } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { // Delete Pods - e2epod.NewPodClient(f).DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.NewPodClient(f).DeleteSync(burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.NewPodClient(f).DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) // Log Events - logPodEvents(f) - logNodeEvents(f) + logPodEvents(ctx, f) + logNodeEvents(ctx, f) }) }) }) -func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func getNodeCPUAndMemoryCapacity(ctx context.Context, f *framework.Framework) v1.ResourceList { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. framework.ExpectEqual(len(nodeList.Items), 1) @@ -113,8 +113,8 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { } } -func getNodeName(f *framework.Framework) string { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func getNodeName(ctx context.Context, f *framework.Framework) string { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. framework.ExpectEqual(len(nodeList.Items), 1) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index d117562fe64..8361a297be7 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -68,9 +68,9 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { f := framework.NewDefaultFramework("density-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // Start a standalone cadvisor pod using 'createSync', the pod is running when it returns - e2epod.NewPodClient(f).CreateSync(getCadvisorPod()) + e2epod.NewPodClient(f).CreateSync(ctx, getCadvisorPod()) // Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with // 1s housingkeeping interval rc = NewResourceCollector(containerStatsPollingPeriod) @@ -109,13 +109,13 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false) + batchLag, e2eLags := runDensityBatchTest(ctx, f, rc, itArg, testInfo, false) ginkgo.By("Verifying latency") - logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true) + logAndVerifyLatency(ctx, batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true) ginkgo.By("Verifying resource") - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) }) } }) @@ -167,13 +167,13 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true) + batchLag, e2eLags := runDensityBatchTest(ctx, f, rc, itArg, testInfo, true) ginkgo.By("Verifying latency") - logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) + logAndVerifyLatency(ctx, batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) ginkgo.By("Verifying resource") - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) }) } }) @@ -205,7 +205,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { // It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated. // Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance. // Note that it will cause higher resource usage. - tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, cfg *kubeletconfig.KubeletConfiguration) { framework.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS) // Set new API QPS limit cfg.KubeAPIQPS = int32(itArg.APIQPSLimit) @@ -213,13 +213,13 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true) + batchLag, e2eLags := runDensityBatchTest(ctx, f, rc, itArg, testInfo, true) ginkgo.By("Verifying latency") - logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) + logAndVerifyLatency(ctx, batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) ginkgo.By("Verifying resource") - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) }) }) } @@ -252,13 +252,13 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "sequence" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo) + batchlag, e2eLags := runDensitySeqTest(ctx, f, rc, itArg, testInfo) ginkgo.By("Verifying latency") - logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true) + logAndVerifyLatency(ctx, batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true) ginkgo.By("Verifying resource") - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) }) } }) @@ -285,13 +285,13 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "sequence" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo) + batchlag, e2eLags := runDensitySeqTest(ctx, f, rc, itArg, testInfo) ginkgo.By("Verifying latency") - logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) + logAndVerifyLatency(ctx, batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) ginkgo.By("Verifying resource") - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) }) } }) @@ -327,7 +327,7 @@ func (dt *densityTest) getTestName() string { } // runDensityBatchTest runs the density batch pod creation test -func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string, +func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string, isLogTimeSeries bool) (time.Duration, []e2emetrics.PodLatencyData) { const ( podType = "density_test_pod" @@ -343,7 +343,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType) // the controller watches the change of pod status - controller := newInformerWatchPod(f, mutex, watchTimes, podType) + controller := newInformerWatchPod(ctx, f, mutex, watchTimes, podType) go controller.Run(stopCh) defer close(stopCh) @@ -357,11 +357,11 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg ginkgo.By("Creating a batch of pods") // It returns a map['pod name']'creation time' containing the creation timestamps - createTimes := createBatchPodWithRateControl(f, pods, testArg.interval) + createTimes := createBatchPodWithRateControl(ctx, f, pods, testArg.interval) ginkgo.By("Waiting for all Pods to be observed by the watch...") - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return len(watchTimes) == testArg.podsNr }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) @@ -401,7 +401,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg batchLag := lastRunning.Time.Sub(firstCreate.Time) rc.Stop() - deletePodsSync(f, pods) + deletePodsSync(ctx, f, pods) // Log time series data. if isLogTimeSeries { @@ -410,13 +410,13 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg // Log throughput data. logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo) - deletePodsSync(f, []*v1.Pod{getCadvisorPod()}) + deletePodsSync(ctx, f, []*v1.Pod{getCadvisorPod()}) return batchLag, e2eLags } // runDensitySeqTest runs the density sequential pod creation test -func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []e2emetrics.PodLatencyData) { +func runDensitySeqTest(ctx context.Context, f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []e2emetrics.PodLatencyData) { const ( podType = "density_test_pod" sleepBeforeCreatePods = 30 * time.Second @@ -427,43 +427,43 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de ginkgo.By("Creating a batch of background pods") // CreatBatch is synchronized, all pods are running when it returns - e2epod.NewPodClient(f).CreateBatch(bgPods) + e2epod.NewPodClient(f).CreateBatch(ctx, bgPods) time.Sleep(sleepBeforeCreatePods) rc.Start() // Create pods sequentially (back-to-back). e2eLags have been sorted. - batchlag, e2eLags := createBatchPodSequential(f, testPods, podType) + batchlag, e2eLags := createBatchPodSequential(ctx, f, testPods, podType) rc.Stop() - deletePodsSync(f, append(bgPods, testPods...)) + deletePodsSync(ctx, f, append(bgPods, testPods...)) // Log throughput data. logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo) - deletePodsSync(f, []*v1.Pod{getCadvisorPod()}) + deletePodsSync(ctx, f, []*v1.Pod{getCadvisorPod()}) return batchlag, e2eLags } // createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation. // between creations there is an interval for throughput control -func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time { +func createBatchPodWithRateControl(ctx context.Context, f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time { createTimes := make(map[string]metav1.Time) for i := range pods { pod := pods[i] createTimes[pod.ObjectMeta.Name] = metav1.Now() - go e2epod.NewPodClient(f).Create(pod) + go e2epod.NewPodClient(f).Create(ctx, pod) time.Sleep(interval) } return createTimes } // getPodStartLatency gets prometheus metric 'pod start latency' from kubelet -func getPodStartLatency(node string) (e2emetrics.KubeletLatencyMetrics, error) { +func getPodStartLatency(ctx context.Context, node string) (e2emetrics.KubeletLatencyMetrics, error) { latencyMetrics := e2emetrics.KubeletLatencyMetrics{} - ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(node, "/metrics") + ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, node, "/metrics") framework.ExpectNoError(err, "Failed to get kubelet metrics without proxy in node %s", node) for _, samples := range ms { @@ -482,7 +482,7 @@ func getPodStartLatency(node string) (e2emetrics.KubeletLatencyMetrics, error) { } // newInformerWatchPod creates an informer to check whether all pods are running. -func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { +func newInformerWatchPod(ctx context.Context, f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { ns := f.Namespace.Name checkPodRunning := func(p *v1.Pod) { mutex.Lock() @@ -500,12 +500,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options) }, }, &v1.Pod{}, @@ -527,7 +527,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m } // createBatchPodSequential creates pods back-to-back in sequence. -func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType string) (time.Duration, []e2emetrics.PodLatencyData) { +func createBatchPodSequential(ctx context.Context, f *framework.Framework, pods []*v1.Pod, podType string) (time.Duration, []e2emetrics.PodLatencyData) { var ( mutex = &sync.Mutex{} watchTimes = make(map[string]metav1.Time, 0) @@ -537,7 +537,7 @@ func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType st init = true ) // the controller watches the change of pod status - controller := newInformerWatchPod(f, mutex, watchTimes, podType) + controller := newInformerWatchPod(ctx, f, mutex, watchTimes, podType) go controller.Run(stopCh) defer close(stopCh) @@ -547,8 +547,8 @@ func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType st for _, pod := range pods { create := metav1.Now() createTimes[pod.Name] = create - p := e2epod.NewPodClient(f).Create(pod) - framework.ExpectNoError(wait.PollImmediate(2*time.Second, framework.PodStartTimeout, podWatchedRunning(watchTimes, p.Name))) + p := e2epod.NewPodClient(f).Create(ctx, pod) + framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 2*time.Second, framework.PodStartTimeout, podWatchedRunning(watchTimes, p.Name))) e2eLags = append(e2eLags, e2emetrics.PodLatencyData{Name: pod.Name, Latency: watchTimes[pod.Name].Time.Sub(create.Time)}) } @@ -574,8 +574,8 @@ func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType st } // podWatchedRunning verifies whether the pod becomes Running, as the watchTime was set by informer -func podWatchedRunning(watchTimes map[string]metav1.Time, podName string) wait.ConditionFunc { - return func() (done bool, err error) { +func podWatchedRunning(watchTimes map[string]metav1.Time, podName string) wait.ConditionWithContextFunc { + return func(ctx context.Context) (done bool, err error) { if _, found := watchTimes[podName]; found { return true, nil } @@ -616,12 +616,12 @@ func printLatencies(latencies []e2emetrics.PodLatencyData, header string) { } // logAndVerifyLatency verifies that whether pod creation latency satisfies the limit. -func logAndVerifyLatency(batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podStartupLimits e2emetrics.LatencyMetric, +func logAndVerifyLatency(ctx context.Context, batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podStartupLimits e2emetrics.LatencyMetric, podBatchStartupLimit time.Duration, testInfo map[string]string, isVerify bool) { printLatencies(e2eLags, "worst client e2e total latencies") // TODO(coufon): do not trust 'kubelet' metrics since they are not reset! - latencyMetrics, _ := getPodStartLatency(kubeletAddr) + latencyMetrics, _ := getPodStartLatency(ctx, kubeletAddr) framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics)) podStartupLatency := extractLatencyMetrics(e2eLags) diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index fba8efbdf50..2892dae1175 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -63,9 +63,9 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur } configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - sd := setupSRIOVConfigOrFail(f, configMap) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) cntName := "gu-container" // we create and delete a pod to make sure the internal device manager state contains a pod allocation @@ -85,19 +85,19 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur podName := "gu-pod-rec-pre-1" framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) // now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin. ginkgo.By("deleting the pod") // note we delete right now because we know the current implementation of devicemanager will NOT // clean up on pod deletion. When this changes, the deletion needs to be done after the test is done. - deletePodSyncByName(f, pod.Name) - waitForAllContainerRemoval(pod.Name, pod.Namespace) + deletePodSyncByName(ctx, f, pod.Name) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) ginkgo.By("teardown the sriov device plugin") // since we will NOT be recreating the plugin, we clean up everything now - teardownSRIOVConfigOrFail(f, sd) + teardownSRIOVConfigOrFail(ctx, f, sd) ginkgo.By("stopping the kubelet") killKubelet("SIGSTOP") @@ -115,8 +115,8 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur ginkgo.By("waiting for the kubelet to be ready again") // Wait for the Kubelet to be ready. - gomega.Eventually(func() bool { - nodes, err := e2enode.TotalReady(f.ClientSet) + gomega.Eventually(ctx, func(ctx context.Context) bool { + nodes, err := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectNoError(err) return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) @@ -131,15 +131,15 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod = makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = e2epod.NewPodClient(f).Create(pod) - err = e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { + pod = e2epod.NewPodClient(f).Create(ctx, pod) + err = e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { return true, nil } return false, nil }) framework.ExpectNoError(err) - pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { @@ -151,7 +151,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur framework.Failf("pod %s failed for wrong reason: %q", pod.Name, pod.Status.Reason) } - deletePodSyncByName(f, pod.Name) + deletePodSyncByName(ctx, f, pod.Name) }) ginkgo.It("should be able to recover V1 (aka pre-1.20) checkpoint data and update topology info on device re-registration", func(ctx context.Context) { @@ -164,13 +164,13 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - sd := setupSRIOVConfigOrFail(f, configMap) - waitForSRIOVResources(f, sd) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) + waitForSRIOVResources(ctx, f, sd) cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize) framework.ExpectNoError(err) - resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) conn.Close() framework.ExpectNoError(err) @@ -183,7 +183,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur } } if suitableDevs == 0 { - teardownSRIOVConfigOrFail(f, sd) + teardownSRIOVConfigOrFail(ctx, f, sd) e2eskipper.Skipf("no devices found on NUMA Cell other than 0") } @@ -205,19 +205,19 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur podName := "gu-pod-rec-pre-1" framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) // now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin. ginkgo.By("deleting the pod") // note we delete right now because we know the current implementation of devicemanager will NOT // clean up on pod deletion. When this changes, the deletion needs to be done after the test is done. - deletePodSyncByName(f, pod.Name) - waitForAllContainerRemoval(pod.Name, pod.Namespace) + deletePodSyncByName(ctx, f, pod.Name) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) ginkgo.By("teardown the sriov device plugin") // no need to delete the config now (speed up later) - deleteSRIOVPodOrFail(f, sd) + deleteSRIOVPodOrFail(ctx, f, sd) ginkgo.By("stopping the kubelet") killKubelet("SIGSTOP") @@ -235,8 +235,8 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur ginkgo.By("waiting for the kubelet to be ready again") // Wait for the Kubelet to be ready. - gomega.Eventually(func() bool { - nodes, err := e2enode.TotalReady(f.ClientSet) + gomega.Eventually(ctx, func(ctx context.Context) bool { + nodes, err := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectNoError(err) return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) @@ -245,9 +245,9 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur configMap: sd.configMap, serviceAccount: sd.serviceAccount, } - sd2.pod = createSRIOVPodOrFail(f) + sd2.pod = createSRIOVPodOrFail(ctx, f) ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd2) - waitForSRIOVResources(f, sd2) + waitForSRIOVResources(ctx, f, sd2) compareSRIOVResources(sd, sd2) @@ -255,7 +255,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur framework.ExpectNoError(err) defer conn.Close() - resp2, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + resp2, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.ExpectNoError(err) cntDevs := stringifyContainerDevices(resp.GetDevices()) diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 84568950dad..9e6da4f48d1 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -97,18 +97,18 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { var v1alphaPodResources *kubeletpodresourcesv1alpha1.ListPodResourcesResponse var v1PodResources *kubeletpodresourcesv1.ListPodResourcesResponse var err error - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Wait for node to be ready") - gomega.Eventually(func() bool { - nodes, err := e2enode.TotalReady(f.ClientSet) + gomega.Eventually(ctx, func(ctx context.Context) bool { + nodes, err := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectNoError(err) return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) - v1alphaPodResources, err = getV1alpha1NodeDevices() + v1alphaPodResources, err = getV1alpha1NodeDevices(ctx) framework.ExpectNoError(err, "should get node local podresources by accessing the (v1alpha) podresources API endpoint") - v1PodResources, err = getV1NodeDevices() + v1PodResources, err = getV1NodeDevices(ctx) framework.ExpectNoError(err, "should get node local podresources by accessing the (v1) podresources API endpoint") // Before we run the device plugin test, we need to ensure @@ -137,30 +137,30 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } } dptemplate = dp.DeepCopy() - devicePluginPod = e2epod.NewPodClient(f).CreateSync(dp) + devicePluginPod = e2epod.NewPodClient(f).CreateSync(ctx, dp) ginkgo.By("Waiting for devices to become available on the local node") - gomega.Eventually(func() bool { - node, ready := getLocalTestNode(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + node, ready := getLocalTestNode(ctx, f) return ready && numberOfSampleResources(node) > 0 }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) framework.Logf("Successfully created device plugin pod") ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node") - gomega.Eventually(func() bool { - node, ready := getLocalTestNode(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + node, ready := getLocalTestNode(ctx, f) return ready && numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen }, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting the device plugin pod") - e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) ginkgo.By("Deleting any Pods created by the test") - l, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{}) + l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range l.Items { if p.Namespace != f.Namespace.Name { @@ -168,14 +168,14 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } framework.Logf("Deleting pod: %s", p.Name) - e2epod.NewPodClient(f).DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute) } restartKubelet(true) ginkgo.By("Waiting for devices to become unavailable on the local node") - gomega.Eventually(func() bool { - node, ready := getLocalTestNode(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + node, ready := getLocalTestNode(ctx, f) return ready && numberOfSampleResources(node) <= 0 }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) @@ -184,15 +184,15 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.It("Can schedule a pod that requires a device", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" - devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) - v1alphaPodResources, err = getV1alpha1NodeDevices() + v1alphaPodResources, err = getV1alpha1NodeDevices(ctx) framework.ExpectNoError(err) - v1PodResources, err = getV1NodeDevices() + v1PodResources, err = getV1NodeDevices(ctx) framework.ExpectNoError(err) framework.Logf("v1alphaPodResources.PodResources:%+v\n", v1alphaPodResources.PodResources) @@ -244,78 +244,78 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" - devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) - pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err := e2epod.NewPodClient(f).Get(ctx, pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - ensurePodContainerRestart(f, pod1.Name, pod1.Name) + ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name) ginkgo.By("Confirming that device assignment persists even after container restart") - devIDAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + devIDAfterRestart := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) framework.ExpectEqual(devIDAfterRestart, devID1) ginkgo.By("Restarting Kubelet") restartKubelet(true) ginkgo.By("Wait for node to be ready again") - e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) + e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 5*time.Minute) ginkgo.By("Validating that assignment is kept") - ensurePodContainerRestart(f, pod1.Name, pod1.Name) + ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name) ginkgo.By("Confirming that after a kubelet restart, fake-device assignment is kept") - devIDRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + devIDRestart1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) framework.ExpectEqual(devIDRestart1, devID1) }) ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" - devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) - pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err := e2epod.NewPodClient(f).Get(ctx, pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Restarting Kubelet") restartKubelet(true) ginkgo.By("Wait for node to be ready again") - e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) + e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 5*time.Minute) ginkgo.By("Re-Register resources and delete the plugin pod") gp := int64(0) deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute) - waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute) + waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Recreating the plugin pod") - devicePluginPod = e2epod.NewPodClient(f).CreateSync(dptemplate) + devicePluginPod = e2epod.NewPodClient(f).CreateSync(ctx, dptemplate) ginkgo.By("Confirming that after a kubelet and pod restart, fake-device assignment is kept") - ensurePodContainerRestart(f, pod1.Name, pod1.Name) - devIDRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name) + devIDRestart1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE) framework.ExpectEqual(devIDRestart1, devID1) ginkgo.By("Waiting for resource to become available on the local node after re-registration") - gomega.Eventually(func() bool { - node, ready := getLocalTestNode(f) + gomega.Eventually(ctx, func() bool { + node, ready := getLocalTestNode(ctx, f) return ready && numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen }, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) ginkgo.By("Creating another pod") - pod2 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod2 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD)) ginkgo.By("Checking that pod got a different fake device") - devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE) + devID2 := parseLog(ctx, f, pod2.Name, pod2.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2))) }) @@ -347,16 +347,16 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod { } // ensurePodContainerRestart confirms that pod container has restarted at least once -func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) { +func ensurePodContainerRestart(ctx context.Context, f *framework.Framework, podName string, contName string) { var initialCount int32 var currentCount int32 - p, err := e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) } initialCount = p.Status.ContainerStatuses[0].RestartCount - gomega.Eventually(func() bool { - p, err = e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() bool { + p, err = e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { return false } @@ -367,8 +367,8 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName } // parseLog returns the matching string for the specified regular expression parsed from the container logs. -func parseLog(f *framework.Framework, podName string, contName string, re string) string { - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) +func parseLog(ctx context.Context, f *framework.Framework, podName string, contName string, re string) string { + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, contName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 92f0e425e45..adab5309557 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -139,7 +139,6 @@ func TestMain(m *testing.M) { const rootfs = "/rootfs" func TestE2eNode(t *testing.T) { - // Make sure we are not limited by sshd when it comes to open files if err := rlimit.SetNumFiles(1000000); err != nil { klog.Infof("failed to set rlimit on max file handles: %v", err) @@ -195,7 +194,7 @@ func TestE2eNode(t *testing.T) { } // Setup the kubelet on the node -var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { +var _ = ginkgo.SynchronizedBeforeSuite(func(ctx context.Context) []byte { // Run system validation test. gomega.Expect(validateSystem()).To(gomega.Succeed(), "system validation") @@ -203,7 +202,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling. if framework.TestContext.PrepullImages { klog.Infof("Pre-pulling images so that they are cached for the tests.") - updateImageAllowList() + updateImageAllowList(ctx) err := PrePullAllImages() gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) } @@ -223,7 +222,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { } klog.Infof("Wait for the node to be ready") - waitForNodeReady() + waitForNodeReady(ctx) // Reference common test to make the import valid. commontest.CurrentSuite = commontest.NodeE2E @@ -232,10 +231,10 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Since the bearer token is generated randomly at run time, // we need to distribute the bearer token to other processes to make them use the same token. return []byte(framework.TestContext.BearerToken) -}, func(token []byte) { +}, func(ctx context.Context, token []byte) { framework.TestContext.BearerToken = string(token) // update test context with node configuration. - gomega.Expect(updateTestContext()).To(gomega.Succeed(), "update test context with node config.") + gomega.Expect(updateTestContext(ctx)).To(gomega.Succeed(), "update test context with node config.") }) // Tear down the kubelet on the node @@ -280,7 +279,7 @@ func maskLocksmithdOnCoreos() { } } -func waitForNodeReady() { +func waitForNodeReady(ctx context.Context) { const ( // nodeReadyTimeout is the time to wait for node to become ready. nodeReadyTimeout = 2 * time.Minute @@ -289,7 +288,7 @@ func waitForNodeReady() { ) client, err := getAPIServerClient() framework.ExpectNoError(err, "should be able to get apiserver client.") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { node, err := getNode(client) if err != nil { return fmt.Errorf("failed to get node: %v", err) @@ -302,9 +301,9 @@ func waitForNodeReady() { } // updateTestContext updates the test context with the node name. -func updateTestContext() error { +func updateTestContext(ctx context.Context) error { setExtraEnvs() - updateImageAllowList() + updateImageAllowList(ctx) client, err := getAPIServerClient() if err != nil { @@ -319,7 +318,7 @@ func updateTestContext() error { // Update test context with current kubelet configuration. // This assumes all tests which dynamically change kubelet configuration // must: 1) run in serial; 2) restore kubelet configuration after test. - kubeletCfg, err := getCurrentKubeletConfig() + kubeletCfg, err := getCurrentKubeletConfig(ctx) if err != nil { return fmt.Errorf("failed to get kubelet configuration: %v", err) } diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 13700c7b1f1..759000d2bb7 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -77,9 +77,9 @@ var _ = SIGDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Evic pressureTimeout := 15 * time.Minute inodesConsumed := uint64(200000) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction. - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) inodesFree := *summary.Node.Fs.InodesFree if inodesFree <= inodesConsumed { e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run") @@ -114,9 +114,9 @@ var _ = SIGDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature: expectedStarvedResource := resourceInodes inodesConsumed := uint64(100000) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction. - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) inodesFree := *summary.Node.Fs.InodesFree if inodesFree <= inodesConsumed { e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run") @@ -144,9 +144,9 @@ var _ = SIGDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][Node expectedStarvedResource := v1.ResourceMemory pressureTimeout := 10 * time.Minute ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds. - kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory] + kubeReserved := getNodeCPUAndMemoryCapacity(ctx, f)[v1.ResourceMemory] // The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb // We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb kubeReserved.Sub(resource.MustParse("300Mi")) @@ -179,8 +179,8 @@ var _ = SIGDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeatu expectedStarvedResource := v1.ResourceEphemeralStorage ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { - summary := eventuallyGetSummary() + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { + summary := eventuallyGetSummary(ctx) diskConsumedByTest := resource.MustParse("4Gi") availableBytesOnSystem := *(summary.Node.Fs.AvailableBytes) @@ -217,9 +217,9 @@ var _ = SIGDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeF expectedNodeCondition := v1.NodeDiskPressure expectedStarvedResource := v1.ResourceEphemeralStorage ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { diskConsumed := resource.MustParse("4Gi") - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availableBytes := *(summary.Node.Fs.AvailableBytes) if availableBytes <= uint64(diskConsumed.Value()) { e2eskipper.Skipf("Too little disk free on the host for the LocalStorageSoftEviction test to run") @@ -254,7 +254,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationMemoryBackedVolumeEviction [Sl f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged evictionTestTimeout := 7 * time.Minute ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty) initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"} initialConfig.FeatureGates["SizeMemoryBackedVolumes"] = false @@ -294,7 +294,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disr f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged evictionTestTimeout := 10 * time.Minute ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty) initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"} }) @@ -353,9 +353,9 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive] highPriority := int32(999999999) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { memoryConsumed := resource.MustParse("600Mi") - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availableBytes := *(summary.Node.Memory.AvailableBytes) if availableBytes <= uint64(memoryConsumed.Value()) { e2eskipper.Skipf("Too little memory free on the host for the PriorityMemoryEvictionOrdering test to run") @@ -363,12 +363,12 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive] initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))} initialConfig.EvictionMinimumReclaim = map[string]string{} }) - ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) + ginkgo.BeforeEach(func(ctx context.Context) { + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) - ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -411,9 +411,9 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru highPriority := int32(999999999) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { diskConsumed := resource.MustParse("4Gi") - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availableBytes := *(summary.Node.Fs.AvailableBytes) if availableBytes <= uint64(diskConsumed.Value()) { e2eskipper.Skipf("Too little disk free on the host for the PriorityLocalStorageEvictionOrdering test to run") @@ -421,12 +421,12 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))} initialConfig.EvictionMinimumReclaim = map[string]string{} }) - ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) + ginkgo.BeforeEach(func(ctx context.Context) { + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) - ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -468,19 +468,19 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No highPriority := int32(999999999) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { pidsConsumed := int64(10000) - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses) initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)} initialConfig.EvictionMinimumReclaim = map[string]string{} }) - ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) + ginkgo.BeforeEach(func(ctx context.Context) { + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) - ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) + ginkgo.AfterEach(func(ctx context.Context) { + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -503,9 +503,9 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No }) ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled [NodeFeature:PodDisruptionConditions]", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { pidsConsumed := int64(10000) - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses) initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)} initialConfig.EvictionMinimumReclaim = map[string]string{} @@ -543,10 +543,10 @@ type podEvictSpec struct { // It ensures that all pods with non-zero evictionPriority are eventually evicted. // // runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists -func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) { +func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(ctx context.Context), testSpecs []podEvictSpec) { // Place the remainder of the test within a context so that the kubelet config is set before and after the test. ginkgo.Context("", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure reduceAllocatableMemoryUsageIfCgroupv1() // Nodes do not immediately report local storage capacity @@ -557,35 +557,35 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe for _, spec := range testSpecs { pods = append(pods, spec.pod) } - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) }) ginkgo.It("should eventually evict all of the correct pods", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition)) - gomega.Eventually(func() error { - logFunc() - if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) { + gomega.Eventually(ctx, func(ctx context.Context) error { + logFunc(ctx) + if expectedNodeCondition == noPressure || hasNodeCondition(ctx, f, expectedNodeCondition) { return nil } return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition) }, pressureTimeout, evictionPollInterval).Should(gomega.BeNil()) ginkgo.By("Waiting for evictions to occur") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func(ctx context.Context) error { if expectedNodeCondition != noPressure { - if hasNodeCondition(f, expectedNodeCondition) { + if hasNodeCondition(ctx, f, expectedNodeCondition) { framework.Logf("Node has %s", expectedNodeCondition) } else { framework.Logf("Node does NOT have %s", expectedNodeCondition) } } - logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) - logFunc() - return verifyEvictionOrdering(f, testSpecs) - }, pressureTimeout, evictionPollInterval).Should(gomega.BeNil()) + logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey) + logFunc(ctx) + return verifyEvictionOrdering(ctx, f, testSpecs) + }, pressureTimeout, evictionPollInterval).Should(gomega.Succeed()) ginkgo.By("checking for the expected pod conditions for evicted pods") - verifyPodConditions(f, testSpecs) + verifyPodConditions(ctx, f, testSpecs) // We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats. // This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager @@ -594,30 +594,30 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe time.Sleep(pressureDelay) ginkgo.By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition)) - gomega.Eventually(func() error { - logFunc() - logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) - if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { + gomega.Eventually(ctx, func(ctx context.Context) error { + logFunc(ctx) + logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey) + if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) { return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition) } return nil }, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil()) ginkgo.By("checking for stable, pressure-free condition without unexpected pod failures") - gomega.Consistently(func() error { - if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { + gomega.Consistently(ctx, func(ctx context.Context) error { + if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) { return fmt.Errorf("%s disappeared and then reappeared", expectedNodeCondition) } - logFunc() - logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) - return verifyEvictionOrdering(f, testSpecs) - }, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.BeNil()) + logFunc(ctx) + logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey) + return verifyEvictionOrdering(ctx, f, testSpecs) + }, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.Succeed()) ginkgo.By("checking for correctly formatted eviction events") - verifyEvictionEvents(f, testSpecs, expectedStarvedResource) + verifyEvictionEvents(ctx, f, testSpecs, expectedStarvedResource) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { prePullImagesIfNeccecary := func() { if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages { // The disk eviction test may cause the prepulled images to be evicted, @@ -631,14 +631,14 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe ginkgo.By("deleting pods") for _, spec := range testSpecs { ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) - e2epod.NewPodClient(f).DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute) } // In case a test fails before verifying that NodeCondition no longer exist on the node, // we should wait for the NodeCondition to disappear ginkgo.By(fmt.Sprintf("making sure NodeCondition %s no longer exists on the node", expectedNodeCondition)) - gomega.Eventually(func() error { - if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { + gomega.Eventually(ctx, func(ctx context.Context) error { + if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) { return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition) } return nil @@ -650,8 +650,8 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe // Ensure that the NodeCondition hasn't returned after pulling images ginkgo.By(fmt.Sprintf("making sure NodeCondition %s doesn't exist again after pulling images", expectedNodeCondition)) - gomega.Eventually(func() error { - if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { + gomega.Eventually(ctx, func(ctx context.Context) error { + if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) { return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition) } return nil @@ -659,7 +659,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe ginkgo.By("making sure we can start a new pod after the test") podName := "test-admit-pod" - e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, @@ -676,8 +676,8 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe if ginkgo.CurrentSpecReport().Failed() { if framework.TestContext.DumpLogsOnFailure { - logPodEvents(f) - logNodeEvents(f) + logPodEvents(ctx, f) + logNodeEvents(ctx, f) } } }) @@ -686,9 +686,9 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe // verifyEvictionOrdering returns an error if all non-zero priority pods have not been evicted, nil otherwise // This function panics (via Expect) if eviction ordering is violated, or if a priority-zero pod fails. -func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) error { +func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) error { // Gather current information - updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return err } @@ -752,10 +752,10 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er return fmt.Errorf("pods that should be evicted are still running: %#v", pendingPods) } -func verifyPodConditions(f *framework.Framework, testSpecs []podEvictSpec) { +func verifyPodConditions(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) { for _, spec := range testSpecs { if spec.wantPodDisruptionCondition != nil { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), spec.pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, spec.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name) cType := *spec.wantPodDisruptionCondition @@ -767,7 +767,7 @@ func verifyPodConditions(f *framework.Framework, testSpecs []podEvictSpec) { } } -func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) { +func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) { for _, spec := range testSpecs { pod := spec.pod if spec.evictionPriority != 0 { @@ -777,7 +777,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe "involvedObject.namespace": f.Namespace.Name, "reason": eviction.Reason, }.AsSelector().String() - podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{FieldSelector: selector}) + podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{FieldSelector: selector}) gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err) framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items)) event := podEvictEvents.Items[0] @@ -822,15 +822,15 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe } // Returns TRUE if the node has the node condition, FALSE otherwise -func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool { - localNodeStatus := getLocalNode(f).Status +func hasNodeCondition(ctx context.Context, f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool { + localNodeStatus := getLocalNode(ctx, f).Status _, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition) gomega.Expect(actualNodeCondition).NotTo(gomega.BeNil()) return actualNodeCondition.Status == v1.ConditionTrue } -func logInodeMetrics() { - summary, err := getNodeSummary() +func logInodeMetrics(ctx context.Context) { + summary, err := getNodeSummary(ctx) if err != nil { framework.Logf("Error getting summary: %v", err) return @@ -856,8 +856,8 @@ func logInodeMetrics() { } } -func logDiskMetrics() { - summary, err := getNodeSummary() +func logDiskMetrics(ctx context.Context) { + summary, err := getNodeSummary(ctx) if err != nil { framework.Logf("Error getting summary: %v", err) return @@ -883,8 +883,8 @@ func logDiskMetrics() { } } -func logMemoryMetrics() { - summary, err := getNodeSummary() +func logMemoryMetrics(ctx context.Context) { + summary, err := getNodeSummary(ctx) if err != nil { framework.Logf("Error getting summary: %v", err) return @@ -907,8 +907,8 @@ func logMemoryMetrics() { } } -func logPidMetrics() { - summary, err := getNodeSummary() +func logPidMetrics(ctx context.Context) { + summary, err := getNodeSummary(ctx) if err != nil { framework.Logf("Error getting summary: %v", err) return @@ -918,9 +918,9 @@ func logPidMetrics() { } } -func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) { - gomega.Eventually(func() error { - summary, err := getNodeSummary() +func eventuallyGetSummary(ctx context.Context) (s *kubeletstatsv1alpha1.Summary) { + gomega.Eventually(ctx, func() error { + summary, err := getNodeSummary(ctx) if err != nil { return err } diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 43d3aa97523..5f531a9cb0c 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -172,13 +172,13 @@ func containerGCTest(f *framework.Framework, test testRun) { } ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { realPods := getPods(test.testPods) - e2epod.NewPodClient(f).CreateBatch(realPods) + e2epod.NewPodClient(f).CreateBatch(ctx, realPods) ginkgo.By("Making sure all containers restart the specified number of times") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func(ctx context.Context) error { for _, podSpec := range test.testPods { - err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount) + err := verifyPodRestartCount(ctx, f, podSpec.podName, podSpec.numContainers, podSpec.restartCount) if err != nil { return err } @@ -187,12 +187,12 @@ func containerGCTest(f *framework.Framework, test testRun) { }, setupDuration, runtimePollInterval).Should(gomega.BeNil()) }) - ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func(ctx context.Context) { + ginkgo.It("Should eventually garbage collect containers when we exceed the number of dead containers per container", func(ctx context.Context) { totalContainers := 0 for _, pod := range test.testPods { totalContainers += pod.numContainers*2 + 1 } - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { total := 0 for _, pod := range test.testPods { containerNames, err := pod.getContainerNames() @@ -223,7 +223,7 @@ func containerGCTest(f *framework.Framework, test testRun) { if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers ginkgo.By("Making sure the kubelet consistently keeps around an extra copy of each container.") - gomega.Consistently(func() error { + gomega.Consistently(ctx, func() error { for _, pod := range test.testPods { containerNames, err := pod.getContainerNames() if err != nil { @@ -246,14 +246,14 @@ func containerGCTest(f *framework.Framework, test testRun) { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { for _, pod := range test.testPods { ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName)) - e2epod.NewPodClient(f).DeleteSync(pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) } ginkgo.By("Making sure all containers get cleaned up") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { for _, pod := range test.testPods { containerNames, err := pod.getContainerNames() if err != nil { @@ -267,8 +267,8 @@ func containerGCTest(f *framework.Framework, test testRun) { }, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil()) if ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure { - logNodeEvents(f) - logPodEvents(f) + logNodeEvents(ctx, f) + logPodEvents(ctx, f) } }) }) @@ -317,8 +317,8 @@ func getRestartingContainerCommand(path string, containerNum int, restarts int32 } } -func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error { - updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) +func verifyPodRestartCount(ctx context.Context, f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error { + updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index be51d9090a3..c43b70485b4 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -207,10 +207,10 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H ginkgo.It("should remove resources for huge page sizes no longer supported", func(ctx context.Context) { ginkgo.By("mimicking support for 9Mi of 3Mi huge page memory by patching the node status") patch := []byte(`[{"op": "add", "path": "/status/capacity/hugepages-3Mi", "value": "9Mi"}, {"op": "add", "path": "/status/allocatable/hugepages-3Mi", "value": "9Mi"}]`) - result := f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) + result := f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(ctx) framework.ExpectNoError(result.Error(), "while patching") - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "while getting node status") ginkgo.By("Verifying that the node now supports huge pages with size 3Mi") @@ -222,8 +222,8 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H restartKubelet(true) ginkgo.By("verifying that the hugepages-3Mi resource no longer is present") - gomega.Eventually(func() bool { - node, err = f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() bool { + node, err = f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "while getting node status") _, isPresent := node.Status.Capacity["hugepages-3Mi"] return isPresent @@ -235,15 +235,15 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H startKubelet := stopKubelet() ginkgo.By(`Patching away support for hugepage resource "hugepages-2Mi"`) patch := []byte(`[{"op": "remove", "path": "/status/capacity/hugepages-2Mi"}, {"op": "remove", "path": "/status/allocatable/hugepages-2Mi"}]`) - result := f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) + result := f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(ctx) framework.ExpectNoError(result.Error(), "while patching") ginkgo.By("Starting kubelet again") startKubelet() ginkgo.By("verifying that the hugepages-2Mi resource is present") - gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err, "while getting node status") _, isPresent := node.Status.Capacity["hugepages-2Mi"] return isPresent @@ -259,7 +259,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H hugepages map[string]int ) - setHugepages := func() { + setHugepages := func(ctx context.Context) { for hugepagesResource, count := range hugepages { size := resourceToSize[hugepagesResource] ginkgo.By(fmt.Sprintf("Verifying hugepages %d are supported", size)) @@ -269,7 +269,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H } ginkgo.By(fmt.Sprintf("Configuring the host to reserve %d of pre-allocated hugepages of size %d", count, size)) - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { if err := configureHugePages(size, count, nil); err != nil { return err } @@ -278,10 +278,10 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H } } - waitForHugepages := func() { + waitForHugepages := func(ctx context.Context) { ginkgo.By("Waiting for hugepages resource to become available on the local node") - gomega.Eventually(func() error { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func(ctx context.Context) error { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return err } @@ -306,9 +306,9 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H }, time.Minute, framework.Poll).Should(gomega.BeNil()) } - releaseHugepages := func() { + releaseHugepages := func(ctx context.Context) { ginkgo.By("Releasing hugepages") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { for hugepagesResource := range hugepages { command := fmt.Sprintf("echo 0 > %s-%dkB/%s", hugepagesDirPrefix, resourceToSize[hugepagesResource], hugepagesCapacityFile) if err := exec.Command("/bin/sh", "-c", command).Run(); err != nil { @@ -337,39 +337,39 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H resourceToCgroup[resourceName], ) ginkgo.By("checking if the expected hugetlb settings were applied") - e2epod.NewPodClient(f).Create(verifyPod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, verifyPod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, verifyPod.Name, f.Namespace.Name) framework.ExpectNoError(err) } }) } // setup - ginkgo.JustBeforeEach(func() { - setHugepages() + ginkgo.JustBeforeEach(func(ctx context.Context) { + setHugepages(ctx) ginkgo.By("restarting kubelet to pick up pre-allocated hugepages") restartKubelet(true) - waitForHugepages() + waitForHugepages(ctx) pod := getHugepagesTestPod(f, limits, mounts, volumes) ginkgo.By("by running a test pod that requests hugepages") - testpod = e2epod.NewPodClient(f).CreateSync(pod) + testpod = e2epod.NewPodClient(f).CreateSync(ctx, pod) }) // we should use JustAfterEach because framework will teardown the client under the AfterEach method - ginkgo.JustAfterEach(func() { + ginkgo.JustAfterEach(func(ctx context.Context) { ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name)) - e2epod.NewPodClient(f).DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) - releaseHugepages() + releaseHugepages(ctx) ginkgo.By("restarting kubelet to pick up pre-allocated hugepages") restartKubelet(true) - waitForHugepages() + waitForHugepages(ctx) }) ginkgo.Context("with the resources requests that contain only one hugepages resource ", func() { diff --git a/test/e2e_node/image_credential_provider.go b/test/e2e_node/image_credential_provider.go index 32904375131..abf6336c511 100644 --- a/test/e2e_node/image_credential_provider.go +++ b/test/e2e_node/image_credential_provider.go @@ -63,6 +63,6 @@ var _ = SIGDescribe("ImageCredentialProvider [Feature:KubeletCredentialProviders } // CreateSync tests that the Pod is running and ready - podClient.CreateSync(pod) + podClient.CreateSync(ctx, pod) }) }) diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index de3f3815bf0..b1826415c6e 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -52,11 +52,11 @@ var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() { }, } - pod := e2epod.NewPodClient(f).Create(podDesc) + pod := e2epod.NewPodClient(f).Create(ctx, podDesc) - framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( + framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) - runningPod, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err := e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) status := runningPod.Status diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index 50a82625adf..84c90fe2628 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -72,7 +72,7 @@ var NodePrePullImageList = sets.NewString( // 1. the hard coded lists // 2. the ones passed in from framework.TestContext.ExtraEnvs // So this function needs to be called after the extra envs are applied. -func updateImageAllowList() { +func updateImageAllowList(ctx context.Context) { // Union NodePrePullImageList and PrePulledImages into the framework image pre-pull list. e2epod.ImagePrePullList = NodePrePullImageList.Union(commontest.PrePulledImages) // Images from extra envs @@ -82,7 +82,7 @@ func updateImageAllowList() { } else { e2epod.ImagePrePullList.Insert(sriovDevicePluginImage) } - if gpuDevicePluginImage, err := getGPUDevicePluginImage(); err != nil { + if gpuDevicePluginImage, err := getGPUDevicePluginImage(ctx); err != nil { klog.Errorln(err) } else { e2epod.ImagePrePullList.Insert(gpuDevicePluginImage) @@ -213,8 +213,8 @@ func PrePullAllImages() error { } // getGPUDevicePluginImage returns the image of GPU device plugin. -func getGPUDevicePluginImage() (string, error) { - ds, err := e2emanifest.DaemonSetFromURL(e2egpu.GPUDevicePluginDSYAML) +func getGPUDevicePluginImage(ctx context.Context) (string, error) { + ds, err := e2emanifest.DaemonSetFromURL(ctx, e2egpu.GPUDevicePluginDSYAML) if err != nil { return "", fmt.Errorf("failed to parse the device plugin image: %w", err) } diff --git a/test/e2e_node/lock_contention_linux_test.go b/test/e2e_node/lock_contention_linux_test.go index 7c68fd578c3..50434f86132 100644 --- a/test/e2e_node/lock_contention_linux_test.go +++ b/test/e2e_node/lock_contention_linux_test.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("Lock contention [Slow] [Disruptive] [NodeSpecialFeature:Loc ginkgo.By("verifying the kubelet is not healthy as there was a lock contention.") // Once the lock is acquired, check if the kubelet is in healthy state or not. // It should not be as the lock contention forces the kubelet to stop. - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, 10*time.Second, time.Second).Should(gomega.BeFalse()) }) diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index 2f2aebcf1f0..e5e66717a15 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -110,16 +110,16 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { } } - createAndWaitPod := func(pod *v1.Pod) error { - podClient.Create(pod) - return e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + createAndWaitPod := func(ctx context.Context, pod *v1.Pod) error { + podClient.Create(ctx, pod) + return e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) } var logPodName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { podClient = e2epod.NewPodClient(f) logPodName = "log-pod-" + string(uuid.NewUUID()) - err := createAndWaitPod(makeLogPod(logPodName, logString)) + err := createAndWaitPod(ctx, makeLogPod(logPodName, logString)) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName) }) ginkgo.It("should print log to correct log path", func(ctx context.Context) { @@ -127,7 +127,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { logDir := kubelet.ContainerLogsDir // get containerID from created Pod - createdLogPod, err := podClient.Get(context.TODO(), logPodName, metav1.GetOptions{}) + createdLogPod, err := podClient.Get(ctx, logPodName, metav1.GetOptions{}) logContainerID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID) framework.ExpectNoError(err, "Failed to get pod: %s", logPodName) @@ -135,7 +135,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { expectedlogFile := logDir + "/" + logPodName + "_" + f.Namespace.Name + "_" + logContainerName + "-" + logContainerID.ID + ".log" logCheckPodName := "log-check-" + string(uuid.NewUUID()) - err = createAndWaitPod(makeLogCheckPod(logCheckPodName, logString, expectedlogFile)) + err = createAndWaitPod(ctx, makeLogCheckPod(logCheckPodName, logString, expectedlogFile)) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCheckPodName) }) @@ -144,7 +144,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { logCRIDir := "/var/log/pods" // get podID from created Pod - createdLogPod, err := podClient.Get(context.TODO(), logPodName, metav1.GetOptions{}) + createdLogPod, err := podClient.Get(ctx, logPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod: %s", logPodName) podNs := createdLogPod.Namespace podName := createdLogPod.Name @@ -154,7 +154,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { expectedCRILogFile := logCRIDir + "/" + podNs + "_" + podName + "_" + podID + "/" + logContainerName + "/0.log" logCRICheckPodName := "log-cri-check-" + string(uuid.NewUUID()) - err = createAndWaitPod(makeLogCheckPod(logCRICheckPodName, logString, expectedCRILogFile)) + err = createAndWaitPod(ctx, makeLogCheckPod(logCRICheckPodName, logString, expectedCRILogFile)) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCRICheckPodName) }) }) diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index 65c1928b8d0..284d03ccf5a 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -271,10 +271,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager evictionHard: map[string]string{evictionHardMemory: "100Mi"}, } - verifyMemoryPinning := func(pod *v1.Pod, numaNodeIDs []int) { + verifyMemoryPinning := func(ctx context.Context, pod *v1.Pod, numaNodeIDs []int) { ginkgo.By("Verifying the NUMA pinning") - output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + output, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) currentNUMANodeIDs, err := cpuset.Parse(strings.Trim(output, "\n")) @@ -283,9 +283,9 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.ToSlice()) } - waitingForHugepages := func(hugepagesCount int) { - gomega.Eventually(func() error { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + waitingForHugepages := func(ctx context.Context, hugepagesCount int) { + gomega.Eventually(ctx, func(ctx context.Context) error { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return err } @@ -309,7 +309,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }, time.Minute, framework.Poll).Should(gomega.BeNil()) } - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { if isMultiNUMASupported == nil { isMultiNUMASupported = pointer.BoolPtr(isMultiNUMA()) } @@ -325,18 +325,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager // allocate hugepages if *is2MiHugepagesSupported { ginkgo.By("Configuring hugepages") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { return configureHugePages(hugepagesSize2M, hugepages2MiCount, pointer.IntPtr(0)) }, 30*time.Second, framework.Poll).Should(gomega.BeNil()) } }) // dynamically update the kubelet configuration - ginkgo.JustBeforeEach(func() { + ginkgo.JustBeforeEach(func(ctx context.Context) { // allocate hugepages if *is2MiHugepagesSupported { ginkgo.By("Waiting for hugepages resource to become available on the local node") - waitingForHugepages(hugepages2MiCount) + waitingForHugepages(ctx, hugepages2MiCount) for i := 0; i < len(ctnParams); i++ { ctnParams[i].hugepages2Mi = "8Mi" @@ -348,16 +348,16 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.JustAfterEach(func() { + ginkgo.JustAfterEach(func(ctx context.Context) { // delete the test pod if testPod != nil && testPod.Name != "" { - e2epod.NewPodClient(f).DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) } // release hugepages if *is2MiHugepagesSupported { ginkgo.By("Releasing allocated hugepages") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { // configure hugepages on the NUMA node 0 to avoid hugepages split across NUMA nodes return configureHugePages(hugepagesSize2M, 0, pointer.IntPtr(0)) }, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages") @@ -365,7 +365,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) ginkgo.Context("with static policy", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { kubeParams := *defaultKubeParams kubeParams.memoryManagerPolicy = staticPolicy updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams) @@ -386,7 +386,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager framework.ExpectNoError(err) defer conn.Close() - resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.ExpectNoError(err) gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty()) @@ -441,14 +441,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start the pod", func(ctx context.Context) { ginkgo.By("Running the test pod") - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { return } - verifyMemoryPinning(testPod, []int{0}) + verifyMemoryPinning(ctx, testPod, []int{0}) }) }) @@ -466,14 +466,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start the pod", func(ctx context.Context) { ginkgo.By("Running the test pod") - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { return } - verifyMemoryPinning(testPod, []int{0}) + verifyMemoryPinning(ctx, testPod, []int{0}) }) }) @@ -497,27 +497,27 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start all pods", func(ctx context.Context) { ginkgo.By("Running the test pod and the test pod 2") - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) ginkgo.By("Running the test pod 2") - testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2) + testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { return } - verifyMemoryPinning(testPod, []int{0}) - verifyMemoryPinning(testPod2, []int{0}) + verifyMemoryPinning(ctx, testPod, []int{0}) + verifyMemoryPinning(ctx, testPod2, []int{0}) }) // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func(ctx context.Context) { ginkgo.By("Running the test pod and the test pod 2") - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) ginkgo.By("Running the test pod 2") - testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2) + testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -526,7 +526,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager framework.ExpectNoError(err) defer conn.Close() - resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{}) + resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{}) framework.ExpectNoError(err) for _, pod := range []*v1.Pod{testPod, testPod2} { @@ -553,10 +553,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.JustAfterEach(func() { + ginkgo.JustAfterEach(func(ctx context.Context) { // delete the test pod 2 if testPod2.Name != "" { - e2epod.NewPodClient(f).DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute) } }) }) @@ -582,7 +582,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.JustBeforeEach(func() { + ginkgo.JustBeforeEach(func(ctx context.Context) { stateData, err := getMemoryManagerState() framework.ExpectNoError(err) @@ -599,18 +599,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } workloadPod := makeMemoryManagerPod(workloadCtnAttrs[0].ctnName, initCtnParams, workloadCtnAttrs) - workloadPod = e2epod.NewPodClient(f).CreateSync(workloadPod) + workloadPod = e2epod.NewPodClient(f).CreateSync(ctx, workloadPod) workloadPods = append(workloadPods, workloadPod) } }) ginkgo.It("should be rejected", func(ctx context.Context) { ginkgo.By("Creating the pod") - testPod = e2epod.NewPodClient(f).Create(testPod) + testPod = e2epod.NewPodClient(f).Create(ctx, testPod) ginkgo.By("Checking that pod failed to start because of admission error") - gomega.Eventually(func() bool { - tmpPod, err := e2epod.NewPodClient(f).Get(context.TODO(), testPod.Name, metav1.GetOptions{}) + gomega.Eventually(ctx, func() bool { + tmpPod, err := e2epod.NewPodClient(f).Get(ctx, testPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if tmpPod.Status.Phase != v1.PodFailed { @@ -632,10 +632,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ) }) - ginkgo.JustAfterEach(func() { + ginkgo.JustAfterEach(func(ctx context.Context) { for _, workloadPod := range workloadPods { if workloadPod.Name != "" { - e2epod.NewPodClient(f).DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute) } } }) @@ -643,7 +643,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) ginkgo.Context("with none policy", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { kubeParams := *defaultKubeParams kubeParams.memoryManagerPolicy = nonePolicy updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams) @@ -671,7 +671,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager framework.ExpectNoError(err) defer conn.Close() - resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.ExpectNoError(err) gomega.Expect(resp.Memory).To(gomega.BeEmpty()) @@ -679,7 +679,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 ginkgo.It("should not report any memory data during request to pod resources List", func(ctx context.Context) { - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -688,7 +688,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager framework.ExpectNoError(err) defer conn.Close() - resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{}) + resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{}) framework.ExpectNoError(err) for _, podResource := range resp.PodResources { @@ -703,14 +703,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) ginkgo.It("should succeed to start the pod", func(ctx context.Context) { - testPod = e2epod.NewPodClient(f).CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { return } - verifyMemoryPinning(testPod, allNUMANodes) + verifyMemoryPinning(ctx, testPod, allNUMANodes) }) }) }) diff --git a/test/e2e_node/mirror_pod_grace_period_test.go b/test/e2e_node/mirror_pod_grace_period_test.go index df86c288ae4..c6afd41365c 100644 --- a/test/e2e_node/mirror_pod_grace_period_test.go +++ b/test/e2e_node/mirror_pod_grace_period_test.go @@ -39,7 +39,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("when create a mirror pod ", func() { var ns, podPath, staticPodName, mirrorPodName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name staticPodName = "graceful-pod-" + string(uuid.NewUUID()) mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName @@ -51,14 +51,14 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be running") - gomega.Eventually(func() error { - return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) ginkgo.It("mirror pod termination should satisfy grace period when static pod is deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID @@ -69,14 +69,14 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be running for grace period") - gomega.Consistently(func() error { - return checkMirrorPodRunningWithUID(f.ClientSet, mirrorPodName, ns, uid) + gomega.Consistently(ctx, func(ctx context.Context) error { + return checkMirrorPodRunningWithUID(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 19*time.Second, 200*time.Millisecond).Should(gomega.BeNil()) }) ginkgo.It("mirror pod termination should satisfy grace period when static pod is updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID @@ -86,17 +86,17 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be running for grace period") - gomega.Consistently(func() error { - return checkMirrorPodRunningWithUID(f.ClientSet, mirrorPodName, ns, uid) + gomega.Consistently(ctx, func(ctx context.Context) error { + return checkMirrorPodRunningWithUID(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 19*time.Second, 200*time.Millisecond).Should(gomega.BeNil()) ginkgo.By("wait for the mirror pod to be updated") - gomega.Eventually(func() error { - return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRecreatedAndRunning(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("check the mirror pod container image is updated") - pod, err = f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(pod.Spec.Containers), 1) framework.ExpectEqual(pod.Spec.Containers[0].Image, image) @@ -104,7 +104,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { ginkgo.It("should update a static pod when the static pod is updated multiple times during the graceful termination period [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID @@ -120,18 +120,18 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be updated") - gomega.Eventually(func() error { - return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRecreatedAndRunning(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("check the mirror pod container image is updated") - pod, err = f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(pod.Spec.Containers), 1) framework.ExpectEqual(pod.Spec.Containers[0].Image, image) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("delete the static pod") err := deleteStaticPod(podPath, staticPodName, ns) if !os.IsNotExist(err) { @@ -139,8 +139,8 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { } ginkgo.By("wait for the mirror pod to disappear") - gomega.Eventually(func() error { - return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodDisappear(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) }) @@ -184,8 +184,8 @@ spec: return err } -func checkMirrorPodRunningWithUID(cl clientset.Interface, name, namespace string, oUID types.UID) error { - pod, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func checkMirrorPodRunningWithUID(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error { + pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index 7f7b6a977b3..41153603b01 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("MirrorPod", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("when create a mirror pod ", func() { var ns, podPath, staticPodName, mirrorPodName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ns = f.Namespace.Name staticPodName = "static-pod-" + string(uuid.NewUUID()) mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName @@ -60,8 +60,8 @@ var _ = SIGDescribe("MirrorPod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be running") - gomega.Eventually(func() error { - return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) /* @@ -71,7 +71,7 @@ var _ = SIGDescribe("MirrorPod", func() { */ ginkgo.It("should be updated when static pod updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID @@ -81,12 +81,12 @@ var _ = SIGDescribe("MirrorPod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be updated") - gomega.Eventually(func() error { - return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRecreatedAndRunning(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("check the mirror pod container image is updated") - pod, err = f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(pod.Spec.Containers), 1) framework.ExpectEqual(pod.Spec.Containers[0].Image, image) @@ -98,17 +98,17 @@ var _ = SIGDescribe("MirrorPod", func() { */ ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID ginkgo.By("delete the mirror pod with grace period 30s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(ctx, mirrorPodName, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") - gomega.Eventually(func() error { - return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRecreatedAndRunning(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) /* @@ -118,27 +118,27 @@ var _ = SIGDescribe("MirrorPod", func() { */ ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID ginkgo.By("delete the mirror pod with grace period 0s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(ctx, mirrorPodName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") - gomega.Eventually(func() error { - return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRecreatedAndRunning(ctx, f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("delete the static pod") err := deleteStaticPod(podPath, staticPodName, ns) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to disappear") - gomega.Eventually(func() error { - return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodDisappear(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) }) @@ -163,8 +163,8 @@ var _ = SIGDescribe("MirrorPod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be running") - gomega.Eventually(func() error { - return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("delete the pod manifest from disk") @@ -177,13 +177,13 @@ var _ = SIGDescribe("MirrorPod", func() { framework.ExpectNoError(err) ginkgo.By("mirror pod should restart with count 1") - gomega.Eventually(func() error { - return checkMirrorPodRunningWithRestartCount(2*time.Second, 2*time.Minute, f.ClientSet, mirrorPodName, ns, 1) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRunningWithRestartCount(ctx, 2*time.Second, 2*time.Minute, f.ClientSet, mirrorPodName, ns, 1) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("mirror pod should stay running") - gomega.Consistently(func() error { - return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + gomega.Consistently(ctx, func(ctx context.Context) error { + return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) }, time.Second*30, time.Second*4).Should(gomega.BeNil()) ginkgo.By("delete the static pod") @@ -191,8 +191,8 @@ var _ = SIGDescribe("MirrorPod", func() { framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to disappear") - gomega.Eventually(func() error { - return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodDisappear(ctx, f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) }) @@ -233,16 +233,16 @@ func deleteStaticPod(dir, name, namespace string) error { return os.Remove(file) } -func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { - _, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func checkMirrorPodDisappear(ctx context.Context, cl clientset.Interface, name, namespace string) error { + _, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") } -func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error { - pod, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func checkMirrorPodRunning(ctx context.Context, cl clientset.Interface, name, namespace string) error { + pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -254,14 +254,14 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error return fmt.Errorf("expected the mirror pod %q with container %q to be running (got containers=%v)", name, pod.Status.ContainerStatuses[i].Name, pod.Status.ContainerStatuses[i].State) } } - return validateMirrorPod(cl, pod) + return validateMirrorPod(ctx, cl, pod) } -func checkMirrorPodRunningWithRestartCount(interval time.Duration, timeout time.Duration, cl clientset.Interface, name, namespace string, count int32) error { +func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Duration, timeout time.Duration, cl clientset.Interface, name, namespace string, count int32) error { var pod *v1.Pod var err error - err = wait.PollImmediate(interval, timeout, func() (bool, error) { - pod, err = cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + err = wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + pod, err = cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -286,11 +286,11 @@ func checkMirrorPodRunningWithRestartCount(interval time.Duration, timeout time. if err != nil { return err } - return validateMirrorPod(cl, pod) + return validateMirrorPod(ctx, cl, pod) } -func checkMirrorPodRecreatedAndRunning(cl clientset.Interface, name, namespace string, oUID types.UID) error { - pod, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +func checkMirrorPodRecreatedAndRunning(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error { + pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -300,10 +300,10 @@ func checkMirrorPodRecreatedAndRunning(cl clientset.Interface, name, namespace s if pod.Status.Phase != v1.PodRunning { return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase) } - return validateMirrorPod(cl, pod) + return validateMirrorPod(ctx, cl, pod) } -func validateMirrorPod(cl clientset.Interface, mirrorPod *v1.Pod) error { +func validateMirrorPod(ctx context.Context, cl clientset.Interface, mirrorPod *v1.Pod) error { hash, ok := mirrorPod.Annotations[kubetypes.ConfigHashAnnotationKey] if !ok || hash == "" { return fmt.Errorf("expected mirror pod %q to have a hash annotation", mirrorPod.Name) @@ -326,7 +326,7 @@ func validateMirrorPod(cl clientset.Interface, mirrorPod *v1.Pod) error { if len(mirrorPod.OwnerReferences) != 1 { return fmt.Errorf("expected mirror pod %q to have a single owner reference: got %d", mirrorPod.Name, len(mirrorPod.OwnerReferences)) } - node, err := cl.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := cl.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to fetch test node: %v", err) } diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index fd53b0bfa45..db36ee3544d 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -68,7 +68,7 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() { ginkgo.It("sets up the node and runs the test", func(ctx context.Context) { - framework.ExpectNoError(runTest(f)) + framework.ExpectNoError(runTest(ctx, f)) }) }) }) @@ -159,14 +159,14 @@ func convertSharesToWeight(shares int64) int64 { return 1 + ((shares-2)*9999)/262142 } -func runTest(f *framework.Framework) error { +func runTest(ctx context.Context, f *framework.Framework) error { var oldCfg *kubeletconfig.KubeletConfiguration subsystems, err := cm.GetCgroupSubsystems() if err != nil { return err } // Get current kubelet configuration - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) if err != nil { return err } @@ -191,7 +191,7 @@ func runTest(f *framework.Framework) error { startKubelet := stopKubelet() // wait until the kubelet health check will fail - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, time.Minute, time.Second).Should(gomega.BeFalse()) @@ -201,7 +201,7 @@ func runTest(f *framework.Framework) error { startKubelet() // wait until the kubelet health check will succeed - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func(ctx context.Context) bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) } @@ -218,7 +218,7 @@ func runTest(f *framework.Framework) error { startKubelet := stopKubelet() // wait until the kubelet health check will fail - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, time.Minute, time.Second).Should(gomega.BeFalse()) @@ -228,7 +228,7 @@ func runTest(f *framework.Framework) error { startKubelet() // wait until the kubelet health check will succeed - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) @@ -251,8 +251,8 @@ func runTest(f *framework.Framework) error { // TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings. // The node may not have updated capacity and allocatable yet, so check that it happens eventually. - gomega.Eventually(func() error { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + gomega.Eventually(ctx, func(ctx context.Context) error { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return err } @@ -306,7 +306,7 @@ func runTest(f *framework.Framework) error { return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory]) } return nil - }, time.Minute, 5*time.Second).Should(gomega.BeNil()) + }, time.Minute, 5*time.Second).Should(gomega.Succeed()) cgroupPath := "" if currentConfig.CgroupDriver == "systemd" { diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index 679defa4044..b5ba5c00969 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -48,14 +48,14 @@ func makeNodePerfPod(w workloads.NodePerfWorkload) *v1.Pod { } } -func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) { +func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) { if cfg != nil { // Update the Kubelet configuration. ginkgo.By("Stopping the kubelet") startKubelet := stopKubelet() // wait until the kubelet health check will fail - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, time.Minute, time.Second).Should(gomega.BeFalse()) @@ -65,14 +65,14 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur startKubelet() // wait until the kubelet health check will succeed - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) } // Wait for the Kubelet to be ready. - gomega.Eventually(func() bool { - nodes, err := e2enode.TotalReady(f.ClientSet) + gomega.Eventually(ctx, func(ctx context.Context) bool { + nodes, err := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectNoError(err) return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) @@ -89,22 +89,22 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { newCfg *kubeletconfig.KubeletConfiguration pod *v1.Pod ) - ginkgo.JustBeforeEach(func() { + ginkgo.JustBeforeEach(func(ctx context.Context) { err := wl.PreTestExec() framework.ExpectNoError(err) - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) newCfg, err = wl.KubeletConfig(oldCfg) framework.ExpectNoError(err) - setKubeletConfig(f, newCfg) + setKubeletConfig(ctx, f, newCfg) }) - cleanup := func() { + cleanup := func(ctx context.Context) { gp := int64(0) delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout) // We are going to give some more time for the CPU manager to do any clean // up it needs to do now that the pod has been deleted. Otherwise we may @@ -117,18 +117,18 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { ginkgo.By("running the post test exec from the workload") err := wl.PostTestExec() framework.ExpectNoError(err) - setKubeletConfig(f, oldCfg) + setKubeletConfig(ctx, f, oldCfg) } - runWorkload := func() { + runWorkload := func(ctx context.Context) { ginkgo.By("running the workload and waiting for success") // Make the pod for the workload. pod = makeNodePerfPod(wl) // Create the pod. - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) // Wait for pod success. // but avoid using WaitForSuccess because we want the container logs upon failure #109295 - podErr := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(), + podErr := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(), func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -140,7 +140,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { } }, ) - podLogs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) if podErr != nil { framework.Logf("dumping pod logs due to pod error detected: \n%s", podLogs) @@ -153,11 +153,11 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { gomega.Expect(podErr).To(gomega.Succeed(), "wait for pod %q to succeed", pod.Name) } - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("ensure environment has enough CPU + Memory to run") minimumRequiredCPU := resource.MustParse("15") minimumRequiredMemory := resource.MustParse("48Gi") - localNodeCap := getLocalNode(f).Status.Allocatable + localNodeCap := getLocalNode(ctx, f).Status.Allocatable cpuCap := localNodeCap[v1.ResourceCPU] memCap := localNodeCap[v1.ResourceMemory] if cpuCap.Cmp(minimumRequiredCPU) == -1 { @@ -174,7 +174,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { }) ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func(ctx context.Context) { ginkgo.DeferCleanup(cleanup) - runWorkload() + runWorkload(ctx) }) }) ginkgo.Context("Run node performance testing with pre-defined workloads", func() { @@ -183,7 +183,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { }) ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func(ctx context.Context) { ginkgo.DeferCleanup(cleanup) - runWorkload() + runWorkload(ctx) }) }) ginkgo.Context("Run node performance testing with pre-defined workloads", func() { @@ -192,7 +192,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { }) ginkgo.It("TensorFlow workload", func(ctx context.Context) { ginkgo.DeferCleanup(cleanup) - runWorkload() + runWorkload(ctx) }) }) }) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index f6ab45c7b9d..7bc9d9613bb 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Seri var lookback time.Duration var eventListOptions metav1.ListOptions - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Calculate Lookback duration") var err error @@ -189,7 +189,7 @@ current-context: local-context eventListOptions = metav1.ListOptions{FieldSelector: selector} ginkgo.By("Create config map for the node problem detector") - _, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ + _, err = c.CoreV1().ConfigMaps(ns).Create(ctx, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: configName}, Data: map[string]string{ path.Base(configFile): config, @@ -201,7 +201,7 @@ current-context: local-context ginkgo.By("Create the node problem detector") hostPathType := new(v1.HostPathType) *hostPathType = v1.HostPathFileOrCreate - pod := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + pod := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -401,47 +401,47 @@ current-context: local-context } ginkgo.By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents)) - gomega.Eventually(func() error { - return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage) + gomega.Eventually(ctx, func(ctx context.Context) error { + return verifyEvents(ctx, c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage) }, pollTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents)) - gomega.Eventually(func() error { - return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) + gomega.Eventually(ctx, func(ctx context.Context) error { + return verifyTotalEvents(ctx, c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) }, pollTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents)) - gomega.Consistently(func() error { - return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) + gomega.Consistently(ctx, func(ctx context.Context) error { + return verifyTotalEvents(ctx, c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) }, pollConsistent, pollInterval).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Make sure node condition %q is set", condition)) - gomega.Eventually(func() error { - return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) + gomega.Eventually(ctx, func(ctx context.Context) error { + return verifyNodeCondition(ctx, c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) }, pollTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Make sure node condition %q is stable", condition)) - gomega.Consistently(func() error { - return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) + gomega.Consistently(ctx, func(ctx context.Context) error { + return verifyNodeCondition(ctx, c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) }, pollConsistent, pollInterval).Should(gomega.Succeed()) } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure { ginkgo.By("Get node problem detector log") - log, err := e2epod.GetPodLogs(c, ns, name, name) + log, err := e2epod.GetPodLogs(ctx, c, ns, name, name) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) framework.Logf("Node Problem Detector logs:\n %s", log) } ginkgo.By("Delete the node problem detector") - e2epod.NewPodClient(f).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)) + framework.ExpectNoError(e2epod.NewPodClient(f).Delete(ctx, name, *metav1.NewDeleteOptions(0))) ginkgo.By("Wait for the node problem detector to disappear") - gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodToDisappear(ctx, c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") - c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{}) + framework.ExpectNoError(c.CoreV1().ConfigMaps(ns).Delete(ctx, configName, metav1.DeleteOptions{})) ginkgo.By("Clean up the events") - gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(ctx, *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) ginkgo.By("Clean up the node condition") patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) - c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) + c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(ctx) }) }) }) @@ -463,8 +463,8 @@ func injectLog(file string, timestamp time.Time, log string, num int) error { } // verifyEvents verifies there are num specific events generated with given reason and message. -func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { - events, err := e.List(context.TODO(), options) +func verifyEvents(ctx context.Context, e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { + events, err := e.List(ctx, options) if err != nil { return err } @@ -482,8 +482,8 @@ func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, nu } // verifyTotalEvents verifies there are num events in total. -func verifyTotalEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int) error { - events, err := e.List(context.TODO(), options) +func verifyTotalEvents(ctx context.Context, e coreclientset.EventInterface, options metav1.ListOptions, num int) error { + events, err := e.List(ctx, options) if err != nil { return err } @@ -498,8 +498,8 @@ func verifyTotalEvents(e coreclientset.EventInterface, options metav1.ListOption } // verifyNodeCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked -func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error { - node, err := n.Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) +func verifyNodeCondition(ctx context.Context, n coreclientset.NodeInterface, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error { + node, err := n.Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index eacdc0b861d..ddd586a8f3e 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -65,7 +65,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut nodeShutdownGracePeriod = 30 * time.Second ) - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates = map[string]bool{ string(features.GracefulNodeShutdown): true, string(features.PodDisruptionConditions): true, @@ -74,9 +74,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod} }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Wait for the node to be ready") - waitForNodeReady() + waitForNodeReady(ctx) }) ginkgo.AfterEach(func() { @@ -86,7 +86,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }) ginkgo.It("should add the DisruptionTarget pod failure condition to the evicted pods", func(ctx context.Context) { - nodeName := getNodeName(f) + nodeName := getNodeName(ctx, f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, }.AsSelector().String() @@ -100,7 +100,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut defer cancel() ginkgo.By("reating batch pods") - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) list, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, @@ -168,7 +168,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut nodeShutdownGracePeriodCriticalPods = 10 * time.Second ) - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates = map[string]bool{ string(features.GracefulNodeShutdown): true, string(features.GracefulNodeShutdownBasedOnPodPriority): false, @@ -177,19 +177,19 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut initialConfig.ShutdownGracePeriodCriticalPods = metav1.Duration{Duration: nodeShutdownGracePeriodCriticalPods} }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Wait for the node to be ready") - waitForNodeReady() + waitForNodeReady(ctx) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Emitting Shutdown false signal; cancelling the shutdown") err := emitSignalPrepareForShutdown(false) framework.ExpectNoError(err) }) ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func(ctx context.Context) { - nodeName := getNodeName(f) + nodeName := getNodeName(ctx, f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, }.AsSelector().String() @@ -203,21 +203,21 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } ginkgo.By("Creating batch pods") - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) - list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ + list, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, }) framework.ExpectNoError(err) framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected") - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { defer ginkgo.GinkgoRecover() w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(ctx, options) }, } @@ -252,8 +252,8 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut ginkgo.By("Verifying that non-critical pods are shutdown") // Not critical pod should be shutdown - gomega.Eventually(func() error { - list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ + gomega.Eventually(ctx, func(ctx context.Context) error { + list, err = e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { @@ -275,12 +275,12 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } } return nil - }, podStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) + }, podStatusUpdateTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By("Verifying that all pods are shutdown") // All pod should be shutdown - gomega.Eventually(func() error { - list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ + gomega.Eventually(ctx, func(ctx context.Context) error { + list, err = e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { @@ -298,7 +298,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }, // Critical pod starts shutdown after (nodeShutdownGracePeriod-nodeShutdownGracePeriodCriticalPods) podStatusUpdateTimeout+(nodeShutdownGracePeriod-nodeShutdownGracePeriodCriticalPods), - pollInterval).Should(gomega.BeNil()) + pollInterval).Should(gomega.Succeed()) }) @@ -306,24 +306,24 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut ginkgo.By("Emitting Shutdown signal") err := emitSignalPrepareForShutdown(true) framework.ExpectNoError(err) - gomega.Eventually(func() error { - isReady := getNodeReadyStatus(f) + gomega.Eventually(ctx, func(ctx context.Context) error { + isReady := getNodeReadyStatus(ctx, f) if isReady { return fmt.Errorf("node did not become shutdown as expected") } return nil - }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) + }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.Succeed()) ginkgo.By("Emitting Shutdown false signal; cancelling the shutdown") err = emitSignalPrepareForShutdown(false) framework.ExpectNoError(err) - gomega.Eventually(func() error { - isReady := getNodeReadyStatus(f) + gomega.Eventually(ctx, func(ctx context.Context) error { + isReady := getNodeReadyStatus(ctx, f) if !isReady { return fmt.Errorf("node did not recover as expected") } return nil - }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) + }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.Succeed()) }) ginkgo.It("after restart dbus, should be able to gracefully shutdown", func(ctx context.Context) { @@ -346,13 +346,13 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut err = emitSignalPrepareForShutdown(true) framework.ExpectNoError(err) - gomega.Eventually(func() error { - isReady := getNodeReadyStatus(f) + gomega.Eventually(ctx, func(ctx context.Context) error { + isReady := getNodeReadyStatus(ctx, f) if isReady { return fmt.Errorf("node did not become shutdown as expected") } return nil - }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) + }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.Succeed()) }) }) @@ -370,7 +370,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut customClassC = getPriorityClass("custom-class-c", 1000) ) - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates = map[string]bool{ string(features.GracefulNodeShutdown): true, string(features.GracefulNodeShutdownBasedOnPodPriority): true, @@ -400,25 +400,25 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }) - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Wait for the node to be ready") - waitForNodeReady() + waitForNodeReady(ctx) customClasses := []*schedulingv1.PriorityClass{customClassA, customClassB, customClassC} for _, customClass := range customClasses { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.Background(), customClass, metav1.CreateOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, customClass, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err) } } - gomega.Eventually(func() error { + gomega.Eventually(ctx, func(ctx context.Context) error { for _, customClass := range customClasses { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Get(context.Background(), customClass.Name, metav1.GetOptions{}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Get(ctx, customClass.Name, metav1.GetOptions{}) if err != nil { return err } } return nil - }, priorityClassesCreateTimeout, pollInterval).Should(gomega.BeNil()) + }, priorityClassesCreateTimeout, pollInterval).Should(gomega.Succeed()) }) ginkgo.AfterEach(func() { @@ -428,7 +428,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }) ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func(ctx context.Context) { - nodeName := getNodeName(f) + nodeName := getNodeName(ctx, f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, }.AsSelector().String() @@ -472,9 +472,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } ginkgo.By("Creating batch pods") - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) - list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ + list, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, }) framework.ExpectNoError(err) @@ -494,8 +494,8 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut ginkgo.By("Verifying that pods are shutdown") for _, step := range downSteps { - gomega.Eventually(func() error { - list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ + gomega.Eventually(ctx, func(ctx context.Context) error { + list, err = e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { @@ -526,7 +526,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } } return nil - }, podStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) + }, podStatusUpdateTimeout, pollInterval).Should(gomega.Succeed()) } ginkgo.By("should have state file") @@ -602,8 +602,8 @@ func emitSignalPrepareForShutdown(b bool) error { return conn.Emit("/org/freedesktop/login1", "org.freedesktop.login1.Manager.PrepareForShutdown", b) } -func getNodeReadyStatus(f *framework.Framework) bool { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func getNodeReadyStatus(ctx context.Context, f *framework.Framework) bool { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. framework.ExpectEqual(len(nodeList.Items), 1) diff --git a/test/e2e_node/os_label_rename_test.go b/test/e2e_node/os_label_rename_test.go index a1eaf2a4f42..0c2c38b6de4 100644 --- a/test/e2e_node/os_label_rename_test.go +++ b/test/e2e_node/os_label_rename_test.go @@ -43,9 +43,9 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Kubelet", func() { ginkgo.It("should reconcile the OS and Arch labels when restarted", func(ctx context.Context) { - node := getLocalNode(f) - e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) - e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) + node := getLocalNode(ctx, f) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) ginkgo.By("killing and restarting kubelet") // Let's kill the kubelet @@ -58,16 +58,16 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu framework.ExpectNoError(err) // Restart kubelet startKubelet() - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.RestartNodeReadyAgainTimeout)) // If this happens right, node should have all the labels reset properly - err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute) + err = waitForNodeLabels(ctx, f.ClientSet.CoreV1(), node.Name, 5*time.Minute) framework.ExpectNoError(err) }) ginkgo.It("should reconcile the OS and Arch labels when running", func(ctx context.Context) { - node := getLocalNode(f) - e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) - e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) + node := getLocalNode(ctx, f) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) + e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) // Update labels newNode := node.DeepCopy() @@ -75,19 +75,19 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu newNode.Labels[v1.LabelArchStable] = "dummyArch" _, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode) framework.ExpectNoError(err) - err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute) + err = waitForNodeLabels(ctx, f.ClientSet.CoreV1(), node.Name, 5*time.Minute) framework.ExpectNoError(err) }) }) }) // waitForNodeLabels waits for the nodes to be have appropriate labels. -func waitForNodeLabels(c v1core.CoreV1Interface, nodeName string, timeout time.Duration) error { +func waitForNodeLabels(ctx context.Context, c v1core.CoreV1Interface, nodeName string, timeout time.Duration) error { ginkgo.By(fmt.Sprintf("Waiting for node %v to have appropriate labels", nodeName)) // Poll until the node has desired labels - return wait.Poll(framework.Poll, timeout, - func() (bool, error) { - node, err := c.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + return wait.PollWithContext(ctx, framework.Poll, timeout, + func(ctx context.Context) (bool, error) { + node, err := c.Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index a59d772aeb2..f981113683a 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -90,7 +90,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod { func runPodPidsLimitTests(f *framework.Framework) { ginkgo.It("should set pids.max for Pod", func(ctx context.Context) { ginkgo.By("by creating a G pod") - pod := e2epod.NewPodClient(f).Create(&v1.Pod{ + pod := e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -113,8 +113,8 @@ func runPodPidsLimitTests(f *framework.Framework) { podUID := string(pod.UID) ginkgo.By("checking if the expected pids settings were applied") verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024")) - e2epod.NewPodClient(f).Create(verifyPod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, verifyPod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, verifyPod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) } @@ -124,7 +124,7 @@ var _ = SIGDescribe("PodPidsLimit [Serial]", func() { f := framework.NewDefaultFramework("pids-limit-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("With config updated with pids limits", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.PodPidsLimit = int64(1024) }) runPodPidsLimitTests(f) diff --git a/test/e2e_node/pod_conditions_test.go b/test/e2e_node/pod_conditions_test.go index 3fd4d64725a..343c695ab9f 100644 --- a/test/e2e_node/pod_conditions_test.go +++ b/test/e2e_node/pod_conditions_test.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("Pod conditions managed by Kubelet", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("including PodHasNetwork condition [Serial] [Feature:PodHasNetwork]", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates = map[string]bool{ string(features.PodHasNetworkCondition): true, } @@ -67,8 +67,8 @@ var _ = SIGDescribe("Pod conditions managed by Kubelet", func() { }) }) -func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func() { - return func() { +func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func(ctx context.Context) { + return func(ctx context.Context) { ginkgo.By("creating a pod whose sandbox creation is blocked due to a missing volume") p := webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers) @@ -89,7 +89,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec }, } - p = e2epod.NewPodClient(f).Create(p) + p = e2epod.NewPodClient(f).Create(ctx, p) ginkgo.By("waiting until kubelet has started trying to set up the pod and started to fail") @@ -99,9 +99,9 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec "involvedObject.namespace": f.Namespace.Name, "reason": events.FailedMountVolume, }.AsSelector().String() - e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, "MountVolume.SetUp failed for volume", framework.PodEventTimeout) + framework.ExpectNoError(e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, "MountVolume.SetUp failed for volume", framework.PodEventTimeout)) - p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("checking pod condition for a pod whose sandbox creation is blocked") @@ -135,14 +135,14 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec } } -func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func() { - return func() { +func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func(ctx context.Context) { + return func(ctx context.Context) { ginkgo.By("creating a pod that successfully comes up in a ready/running state") - p := e2epod.NewPodClient(f).Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers)) - e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) + p := e2epod.NewPodClient(f).Create(ctx, webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers)) + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)) - p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(ctx, p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) diff --git a/test/e2e_node/pod_hostnamefqdn_test.go b/test/e2e_node/pod_hostnamefqdn_test.go index 5ec64646368..6c8e7314af7 100644 --- a/test/e2e_node/pod_hostnamefqdn_test.go +++ b/test/e2e_node/pod_hostnamefqdn_test.go @@ -85,7 +85,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)} // Create Pod - e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output) + e2eoutput.TestContainerOutput(ctx, f, "shortname only", pod, 0, output) }) /* @@ -102,7 +102,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)} // Create Pod - e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output) + e2eoutput.TestContainerOutput(ctx, f, "shortname only", pod, 0, output) }) /* @@ -121,7 +121,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, hostFQDN)} // Create Pod - e2eoutput.TestContainerOutput(f, "shortname and fqdn", pod, 0, output) + e2eoutput.TestContainerOutput(ctx, f, "shortname and fqdn", pod, 0, output) }) /* @@ -146,7 +146,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { framework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf("The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.", hostFQDN, len(hostFQDN))) output := []string{fmt.Sprintf("%s;%s;", hostFQDN, hostFQDN)} // Create Pod - e2eoutput.TestContainerOutput(f, "fqdn and fqdn", pod, 0, output) + e2eoutput.TestContainerOutput(ctx, f, "fqdn and fqdn", pod, 0, output) }) /* @@ -161,7 +161,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { ginkgo.It("a pod configured to set FQDN as hostname will remain in Pending "+ "state generating FailedCreatePodSandBox events when the FQDN is "+ - "longer than 64 bytes", func() { + "longer than 64 bytes", func(ctx context.Context) { // 55 characters for name plus -.t.svc.cluster.local is way more than 64 bytes pod := testPod("hostfqdnveryveryveryverylongforfqdntobemorethan64bytes") pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} @@ -172,7 +172,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { setHostnameAsFQDN := true pod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN // Create Pod - launchedPod := e2epod.NewPodClient(f).Create(pod) + launchedPod := e2epod.NewPodClient(f).Create(ctx, pod) // Ensure we delete pod ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) @@ -182,9 +182,9 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { "to construct FQDN from pod hostname and cluster domain, FQDN " framework.Logf("Waiting for Pod to generate FailedCreatePodSandBox event.") // Wait for event with reason FailedCreatePodSandBox - expectSandboxFailureEvent(f, launchedPod, expectedMessage) + expectSandboxFailureEvent(ctx, f, launchedPod, expectedMessage) // Check Pod is in Pending Phase - err := checkPodIsPending(f, launchedPod.ObjectMeta.Name, launchedPod.ObjectMeta.Namespace) + err := checkPodIsPending(ctx, f, launchedPod.ObjectMeta.Name, launchedPod.ObjectMeta.Namespace) framework.ExpectNoError(err) }) @@ -192,25 +192,25 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { // expectSandboxFailureEvent polls for an event with reason "FailedCreatePodSandBox" containing the // expected message string. -func expectSandboxFailureEvent(f *framework.Framework, pod *v1.Pod, msg string) { +func expectSandboxFailureEvent(ctx context.Context, f *framework.Framework, pod *v1.Pod, msg string) { eventSelector := fields.Set{ "involvedObject.kind": "Pod", "involvedObject.name": pod.Name, "involvedObject.namespace": f.Namespace.Name, "reason": events.FailedCreatePodSandBox, }.AsSelector().String() - framework.ExpectNoError(e2eevents.WaitTimeoutForEvent( + framework.ExpectNoError(e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodEventTimeout)) } -func checkPodIsPending(f *framework.Framework, podName, namespace string) error { +func checkPodIsPending(ctx context.Context, f *framework.Framework, podName, namespace string) error { c := f.ClientSet // we call this function after we saw event failing to create Pod, hence // pod has already been created and it should be in Pending status. Giving // 30 seconds to fetch the pod to avoid failing for transient issues getting // pods. fetchPodTimeout := 30 * time.Second - return e2epod.WaitForPodCondition(c, namespace, podName, "Failed to Create Pod", fetchPodTimeout, func(pod *v1.Pod) (bool, error) { + return e2epod.WaitForPodCondition(ctx, c, namespace, podName, "Failed to Create Pod", fetchPodTimeout, func(pod *v1.Pod) (bool, error) { // We are looking for the pod to be scheduled and in Pending state if pod.Status.Phase == v1.PodPending { for _, cond := range pod.Status.Conditions { diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 3c36da372c3..f39aa59d729 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -136,8 +136,8 @@ func logPodResources(podIdx int, pr *kubeletpodresourcesv1.PodResources) { type podResMap map[string]map[string]kubeletpodresourcesv1.ContainerResources -func getPodResources(cli kubeletpodresourcesv1.PodResourcesListerClient) podResMap { - resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{}) +func getPodResources(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient) podResMap { + resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{}) framework.ExpectNoError(err) res := make(map[string]map[string]kubeletpodresourcesv1.ContainerResources) @@ -164,10 +164,10 @@ func newTestPodData() *testPodData { } } -func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podDesc) { +func (tpd *testPodData) createPodsForTest(ctx context.Context, f *framework.Framework, podReqs []podDesc) { for _, podReq := range podReqs { pod := makePodResourcesTestPod(podReq) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) framework.Logf("created pod %s", podReq.podName) tpd.PodMap[podReq.podName] = pod @@ -175,17 +175,17 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD } /* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */ -func (tpd *testPodData) deletePodsForTest(f *framework.Framework) { - deletePodsAsync(f, tpd.PodMap) +func (tpd *testPodData) deletePodsForTest(ctx context.Context, f *framework.Framework) { + deletePodsAsync(ctx, f, tpd.PodMap) } /* deletePod removes pod during a test. Should do a best-effort clean up */ -func (tpd *testPodData) deletePod(f *framework.Framework, podName string) { +func (tpd *testPodData) deletePod(ctx context.Context, f *framework.Framework, podName string) { _, ok := tpd.PodMap[podName] if !ok { return } - deletePodSyncByName(f, podName) + deletePodSyncByName(ctx, f, podName) delete(tpd.PodMap, podName) } @@ -250,11 +250,11 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error { return nil } -func expectPodResources(offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc) { - gomega.EventuallyWithOffset(1+offset, func() error { - found := getPodResources(cli) +func expectPodResources(ctx context.Context, offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc) { + gomega.EventuallyWithOffset(1+offset, ctx, func(ctx context.Context) error { + found := getPodResources(ctx, cli) return matchPodDescWithResources(expected, found) - }, time.Minute, 10*time.Second).Should(gomega.BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.Succeed()) } func filterOutDesc(descs []podDesc, name string) []podDesc { @@ -268,7 +268,7 @@ func filterOutDesc(descs []podDesc, name string) []podDesc { return ret } -func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData) { +func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData) { var tpd *testPodData var found podResMap @@ -281,7 +281,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod } ginkgo.By("checking the output when no pods are present") - found = getPodResources(cli) + found = getPodResources(ctx, cli) gomega.ExpectWithOffset(1, found).To(gomega.HaveLen(expectedBasePods), "base pod expectation mismatch") tpd = newTestPodData() @@ -296,9 +296,9 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod cntName: "cnt-00", }, } - tpd.createPodsForTest(f, expected) - expectPodResources(1, cli, expected) - tpd.deletePodsForTest(f) + tpd.createPodsForTest(ctx, f, expected) + expectPodResources(ctx, 1, cli, expected) + tpd.deletePodsForTest(ctx, f) tpd = newTestPodData() ginkgo.By("checking the output when only a subset of pods require resources") @@ -352,9 +352,9 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod } } - tpd.createPodsForTest(f, expected) - expectPodResources(1, cli, expected) - tpd.deletePodsForTest(f) + tpd.createPodsForTest(ctx, f, expected) + expectPodResources(ctx, 1, cli, expected) + tpd.deletePodsForTest(ctx, f) tpd = newTestPodData() ginkgo.By("checking the output when creating pods which require resources between calls") @@ -396,8 +396,8 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod } } - tpd.createPodsForTest(f, expected) - expectPodResources(1, cli, expected) + tpd.createPodsForTest(ctx, f, expected) + expectPodResources(ctx, 1, cli, expected) if sd != nil { extra = podDesc{ @@ -416,13 +416,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod } - tpd.createPodsForTest(f, []podDesc{ + tpd.createPodsForTest(ctx, f, []podDesc{ extra, }) expected = append(expected, extra) - expectPodResources(1, cli, expected) - tpd.deletePodsForTest(f) + expectPodResources(ctx, 1, cli, expected) + tpd.deletePodsForTest(ctx, f) tpd = newTestPodData() ginkgo.By("checking the output when deleting pods which require resources between calls") @@ -476,13 +476,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod }, } } - tpd.createPodsForTest(f, expected) - expectPodResources(1, cli, expected) + tpd.createPodsForTest(ctx, f, expected) + expectPodResources(ctx, 1, cli, expected) - tpd.deletePod(f, "pod-01") + tpd.deletePod(ctx, f, "pod-01") expectedPostDelete := filterOutDesc(expected, "pod-01") - expectPodResources(1, cli, expectedPostDelete) - tpd.deletePodsForTest(f) + expectPodResources(ctx, 1, cli, expectedPostDelete) + tpd.deletePodsForTest(ctx, f) tpd = newTestPodData() ginkgo.By("checking the output when pods request non integral CPUs") @@ -511,15 +511,15 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod } } - tpd.createPodsForTest(f, expected) - expectPodResources(1, cli, expected) - tpd.deletePodsForTest(f) + tpd.createPodsForTest(ctx, f, expected) + expectPodResources(ctx, 1, cli, expected) + tpd.deletePodsForTest(ctx, f) } -func podresourcesGetAllocatableResourcesTests(cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) { +func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) { ginkgo.By("checking the devices known to the kubelet") - resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.ExpectNoErrorWithOffset(1, err) devs := resp.GetDevices() allocatableCPUs := cpuset.NewCPUSetInt64(resp.GetCpuIds()...) @@ -565,9 +565,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.Context("with CPU manager Static policy", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // this is a very rough check. We just want to rule out system that does NOT have enough resources - _, cpuAlloc, _ := getLocalNodeCPUDetails(f) + _, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f) if cpuAlloc < minCoreCount { e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount) @@ -576,7 +576,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P // empty context to apply kubelet config changes ginkgo.Context("", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set the CPU Manager policy to static. initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic) @@ -593,10 +593,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - sd := setupSRIOVConfigOrFail(f, configMap) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd) - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -605,12 +605,12 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) defer conn.Close() - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) ginkgo.By("checking List()") - podresourcesListTests(f, cli, sd) + podresourcesListTests(ctx, f, cli, sd) ginkgo.By("checking GetAllocatableResources()") - podresourcesGetAllocatableResourcesTests(cli, sd, onlineCPUs, reservedSystemCPUs) + podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs) }) }) }) @@ -622,10 +622,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P requireSRIOVDevices() configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - sd := setupSRIOVConfigOrFail(f, configMap) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd) - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -634,11 +634,11 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) defer conn.Close() - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) // intentionally passing empty cpuset instead of onlineCPUs because with none policy // we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static - podresourcesGetAllocatableResourcesTests(cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{}) + podresourcesGetAllocatableResourcesTests(ctx, cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{}) }) }) }) @@ -649,9 +649,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.Context("with CPU manager Static policy", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // this is a very rough check. We just want to rule out system that does NOT have enough resources - _, cpuAlloc, _ := getLocalNodeCPUDetails(f) + _, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f) if cpuAlloc < minCoreCount { e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount) @@ -660,7 +660,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P // empty context to apply kubelet config changes ginkgo.Context("", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set the CPU Manager policy to static. initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic) @@ -683,8 +683,8 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) defer conn.Close() - podresourcesListTests(f, cli, nil) - podresourcesGetAllocatableResourcesTests(cli, nil, onlineCPUs, reservedSystemCPUs) + podresourcesListTests(ctx, f, cli, nil) + podresourcesGetAllocatableResourcesTests(ctx, cli, nil, onlineCPUs, reservedSystemCPUs) }) }) }) @@ -700,12 +700,12 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P // intentionally passing empty cpuset instead of onlineCPUs because with none policy // we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static - podresourcesGetAllocatableResourcesTests(cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{}) + podresourcesGetAllocatableResourcesTests(ctx, cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{}) }) }) ginkgo.Context("with disabled KubeletPodResourcesGetAllocatable feature gate", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { if initialConfig.FeatureGates == nil { initialConfig.FeatureGates = make(map[string]bool) } @@ -721,7 +721,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P defer conn.Close() ginkgo.By("checking GetAllocatableResources fail if the feature gate is not enabled") - allocatableRes, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + allocatableRes, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.Logf("GetAllocatableResources result: %v, err: %v", allocatableRes, err) framework.ExpectError(err, "With feature gate disabled, the call must fail") }) @@ -737,9 +737,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.Context("with CPU manager Static policy", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // this is a very rough check. We just want to rule out system that does NOT have enough resources - _, cpuAlloc, _ := getLocalNodeCPUDetails(f) + _, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f) if cpuAlloc < minCoreCount { e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount) @@ -748,7 +748,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P // empty context to apply kubelet config changes ginkgo.Context("", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { // Set the CPU Manager policy to static. initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic) @@ -761,10 +761,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.It("should return proper podresources the same as before the restart of kubelet", func(ctx context.Context) { - dpPod := setupKubeVirtDevicePluginOrFail(f) + dpPod := setupKubeVirtDevicePluginOrFail(ctx, f) ginkgo.DeferCleanup(teardownKubeVirtDevicePluginOrFail, f, dpPod) - waitForKubeVirtResources(f, dpPod) + waitForKubeVirtResources(ctx, f, dpPod) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -775,7 +775,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P ginkgo.By("checking List and resources kubevirt resource should be without topology") - allocatableResponse, _ := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + allocatableResponse, _ := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) for _, dev := range allocatableResponse.GetDevices() { if dev.ResourceName != KubeVirtResourceName { continue @@ -794,11 +794,11 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P } tpd := newTestPodData() - tpd.createPodsForTest(f, []podDesc{ + tpd.createPodsForTest(ctx, f, []podDesc{ desc, }) - expectPodResources(1, cli, []podDesc{desc}) + expectPodResources(ctx, 1, cli, []podDesc{desc}) restartTime := time.Now() ginkgo.By("Restarting Kubelet") @@ -807,8 +807,8 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P // we need to wait for the node to be reported ready before we can safely query // the podresources endpoint again. Otherwise we will have false negatives. ginkgo.By("Wait for node to be ready") - gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + gomega.Eventually(ctx, func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) for _, cond := range node.Status.Conditions { if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) { @@ -818,15 +818,15 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P return false }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) - expectPodResources(1, cli, []podDesc{desc}) - tpd.deletePodsForTest(f) + expectPodResources(ctx, 1, cli, []podDesc{desc}) + tpd.deletePodsForTest(ctx, f) }) }) }) }) ginkgo.Context("when querying /metrics", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { // ensure APIs have been called at least once endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -835,10 +835,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) defer conn.Close() - _, err = cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{}) + _, err = cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{}) framework.ExpectNoError(err) - _, err = cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{}) + _, err = cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{}) framework.ExpectNoError(err) }) @@ -861,9 +861,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.By("Giving the Kubelet time to start up and produce metrics") - gomega.Eventually(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) + gomega.Eventually(ctx, getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) ginkgo.By("Ensuring the metrics match the expectations a few more times") - gomega.Consistently(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) + gomega.Consistently(ctx, getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) }) }) @@ -883,18 +883,18 @@ func getOnlineCPUs() (cpuset.CPUSet, error) { return cpuset.Parse(strings.TrimSpace(string(onlineCPUList))) } -func setupKubeVirtDevicePluginOrFail(f *framework.Framework) *v1.Pod { - e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute) +func setupKubeVirtDevicePluginOrFail(ctx context.Context, f *framework.Framework) *v1.Pod { + e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, framework.TestContext.NodeName, 5*time.Minute) dp := getKubeVirtDevicePluginPod() dp.Spec.NodeName = framework.TestContext.NodeName ginkgo.By("Create KubeVirt device plugin pod") - dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{}) + dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, dp, metav1.CreateOptions{}) framework.ExpectNoError(err) - if err = e2epod.WaitForPodCondition(f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil { + if err = e2epod.WaitForPodCondition(ctx, f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil { framework.Logf("KubeVirt Pod %v took too long to enter running/ready: %v", dp.Name, err) } framework.ExpectNoError(err) @@ -902,16 +902,16 @@ func setupKubeVirtDevicePluginOrFail(f *framework.Framework) *v1.Pod { return dpPod } -func teardownKubeVirtDevicePluginOrFail(f *framework.Framework, pod *v1.Pod) { +func teardownKubeVirtDevicePluginOrFail(ctx context.Context, f *framework.Framework, pod *v1.Pod) { gp := int64(0) deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } ginkgo.By(fmt.Sprintf("Delete KubeVirt device plugin pod %s/%s", pod.Namespace, pod.Name)) - err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) + err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, deleteOptions) framework.ExpectNoError(err) - waitForAllContainerRemoval(pod.Name, pod.Namespace) + waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace) } func findKubeVirtResource(node *v1.Node) int64 { @@ -927,11 +927,11 @@ func findKubeVirtResource(node *v1.Node) int64 { return 0 } -func waitForKubeVirtResources(f *framework.Framework, pod *v1.Pod) { +func waitForKubeVirtResources(ctx context.Context, f *framework.Framework, pod *v1.Pod) { ginkgo.By("Waiting for kubevirt resources to become available on the local node") - gomega.Eventually(func() bool { - node := getLocalNode(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + node := getLocalNode(ctx, f) kubeVirtResourceAmount := findKubeVirtResource(node) return kubeVirtResourceAmount != 0 }, 2*time.Minute, framework.Poll).Should(gomega.BeTrue()) @@ -957,10 +957,10 @@ func getKubeVirtDevicePluginPod() *v1.Pod { return p } -func getPodResourcesMetrics() (e2emetrics.KubeletMetrics, error) { +func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) { // we are running out of good names, so we need to be unnecessarily specific to avoid clashes ginkgo.By("getting Pod Resources metrics from the metrics API") - return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") + return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics") } func timelessSampleAtLeast(lower interface{}) types.GomegaMatcher { diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index f4f3315bc8a..b946bfa56b0 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -176,8 +176,8 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { } cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup} pod := makePodToVerifyCgroups(cgroupsToVerify) - e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) }) @@ -194,7 +194,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { podUID string ) ginkgo.By("Creating a Guaranteed pod in Namespace", func() { - guaranteedPod = e2epod.NewPodClient(f).Create(&v1.Pod{ + guaranteedPod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -214,17 +214,17 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := e2epod.NewPodClient(f).Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(ctx, guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("pod" + podUID) - e2epod.NewPodClient(f).Create(pod) - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) }) @@ -239,7 +239,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { bestEffortPod *v1.Pod ) ginkgo.By("Creating a BestEffort pod in Namespace", func() { - bestEffortPod = e2epod.NewPodClient(f).Create(&v1.Pod{ + bestEffortPod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -259,17 +259,17 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"besteffort/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := e2epod.NewPodClient(f).Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(ctx, bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID) - e2epod.NewPodClient(f).Create(pod) - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) }) @@ -284,7 +284,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { burstablePod *v1.Pod ) ginkgo.By("Creating a Burstable pod in Namespace", func() { - burstablePod = e2epod.NewPodClient(f).Create(&v1.Pod{ + burstablePod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -304,17 +304,17 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"burstable/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := e2epod.NewPodClient(f).Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(ctx, burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID) - e2epod.NewPodClient(f).Create(pod) - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + e2epod.NewPodClient(f).Create(ctx, pod) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) }) diff --git a/test/e2e_node/quota_lsci_test.go b/test/e2e_node/quota_lsci_test.go index 8e9ae737157..ce7fc9a27da 100644 --- a/test/e2e_node/quota_lsci_test.go +++ b/test/e2e_node/quota_lsci_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "path/filepath" "time" @@ -56,7 +57,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool) { priority = 1 } ginkgo.Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { defer withFeatureGate(LSCIQuotaFeature, quotasRequested)() // TODO: remove hardcoded kubelet volume directory path // framework.TestContext.KubeVolumeDir is currently not populated for node e2e diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index b5af64af376..97d37faafe9 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -370,7 +370,7 @@ func getCadvisorPod() *v1.Pod { } // deletePodsSync deletes a list of pods and block until pods disappear. -func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { +func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) { var wg sync.WaitGroup for i := range pods { pod := pods[i] @@ -379,12 +379,12 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) + err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) if apierrors.IsNotFound(err) { framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err) } - gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + gomega.Expect(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred()) }() } diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index 271fa3700e3..4cdb74b4c6c 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -47,16 +47,16 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { f := framework.NewDefaultFramework("resource-metrics") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("when querying /resource/metrics", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("Creating test pods to measure their resource usage") numRestarts := int32(1) pods := getSummaryTestPods(f, numRestarts, pod0, pod1) - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func(ctx context.Context) error { for _, pod := range pods { - err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts) + err := verifyPodRestartCount(ctx, f, pod.Name, len(pod.Spec.Containers), numRestarts) if err != nil { return err } @@ -69,7 +69,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { }) ginkgo.It("should report resource usage through the resource metrics api", func(ctx context.Context) { ginkgo.By("Fetching node so we can match against an appropriate memory limit") - node := getLocalNode(f) + node := getLocalNode(ctx, f) memoryCapacity := node.Status.Capacity["memory"] memoryLimit := memoryCapacity.Value() @@ -108,30 +108,30 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { }), }) ginkgo.By("Giving pods a minute to start up and produce metrics") - gomega.Eventually(getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) + gomega.Eventually(ctx, getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) ginkgo.By("Ensuring the metrics match the expectations a few more times") - gomega.Consistently(getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) + gomega.Consistently(ctx, getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting test pods") var zero int64 = 0 - e2epod.NewPodClient(f).DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) - e2epod.NewPodClient(f).DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) if !ginkgo.CurrentSpecReport().Failed() { return } if framework.TestContext.DumpLogsOnFailure { - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) } ginkgo.By("Recording processes in system cgroups") - recordSystemCgroupProcesses() + recordSystemCgroupProcesses(ctx) }) }) }) -func getResourceMetrics() (e2emetrics.KubeletMetrics, error) { +func getResourceMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) { ginkgo.By("getting stable resource metrics API") - return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource") + return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics/resource") } func nodeID(element interface{}) string { diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 05c07d075ba..71432389c03 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -51,18 +51,18 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { f := framework.NewDefaultFramework("resource-usage") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { - om = e2ekubelet.NewRuntimeOperationMonitor(f.ClientSet) + ginkgo.BeforeEach(func(ctx context.Context) { + om = e2ekubelet.NewRuntimeOperationMonitor(ctx, f.ClientSet) // The test collects resource usage from a standalone Cadvisor pod. // The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to // show the resource usage spikes. But changing its interval increases the overhead // of kubelet. Hence we use a Cadvisor pod. - e2epod.NewPodClient(f).CreateSync(getCadvisorPod()) + e2epod.NewPodClient(f).CreateSync(ctx, getCadvisorPod()) rc = NewResourceCollector(containerStatsPollingPeriod) }) - ginkgo.AfterEach(func() { - result := om.GetLatestRuntimeOperationErrorRate() + ginkgo.AfterEach(func(ctx context.Context) { + result := om.GetLatestRuntimeOperationErrorRate(ctx) framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result)) }) @@ -90,10 +90,10 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { ginkgo.It(desc, func(ctx context.Context) { testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - runResourceUsageTest(f, rc, itArg) + runResourceUsageTest(ctx, f, rc, itArg) // Log and verify resource usage - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true) }) } }) @@ -120,10 +120,10 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { ginkgo.It(desc, func(ctx context.Context) { testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) - runResourceUsageTest(f, rc, itArg) + runResourceUsageTest(ctx, f, rc, itArg) // Log and verify resource usage - logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) + logAndVerifyResource(ctx, f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) }) } }) @@ -140,7 +140,7 @@ func (rt *resourceTest) getTestName() string { } // runResourceUsageTest runs the resource usage test -func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) { +func runResourceUsageTest(ctx context.Context, f *framework.Framework, rc *ResourceCollector, testArg resourceTest) { const ( // The monitoring time for one test monitoringTime = 10 * time.Minute @@ -157,7 +157,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg ginkgo.DeferCleanup(rc.Stop) ginkgo.By("Creating a batch of Pods") - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) // wait for a while to let the node be steady time.Sleep(sleepAfterCreatePods) @@ -173,7 +173,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg // for the current test duration, but we should reclaim the // entries if we plan to monitor longer (e.g., 8 hours). deadline := time.Now().Add(monitoringTime) - for time.Now().Before(deadline) { + for time.Now().Before(deadline) && ctx.Err() == nil { timeLeft := time.Until(deadline) framework.Logf("Still running...%v left", timeLeft) if timeLeft < reportingPeriod { @@ -181,15 +181,15 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg } else { time.Sleep(reportingPeriod) } - logPods(f.ClientSet) + logPods(ctx, f.ClientSet) } ginkgo.By("Reporting overall resource usage") - logPods(f.ClientSet) + logPods(ctx, f.ClientSet) } // logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit. -func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits e2ekubelet.ContainersCPUSummary, +func logAndVerifyResource(ctx context.Context, f *framework.Framework, rc *ResourceCollector, cpuLimits e2ekubelet.ContainersCPUSummary, memLimits e2ekubelet.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) { nodeName := framework.TestContext.NodeName @@ -214,12 +214,12 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi // Verify resource usage if isVerify { - verifyMemoryLimits(f.ClientSet, memLimits, usagePerNode) + verifyMemoryLimits(ctx, f.ClientSet, memLimits, usagePerNode) verifyCPULimits(cpuLimits, cpuSummaryPerNode) } } -func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) { +func verifyMemoryLimits(ctx context.Context, c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) { if expected == nil { return } @@ -242,7 +242,7 @@ func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsage } if len(nodeErrs) > 0 { errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", "))) - heapStats, err := e2ekubelet.GetKubeletHeapStats(c, nodeName) + heapStats, err := e2ekubelet.GetKubeletHeapStats(ctx, c, nodeName) if err != nil { framework.Logf("Unable to get heap stats from %q", nodeName) } else { @@ -289,9 +289,9 @@ func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet } } -func logPods(c clientset.Interface) { +func logPods(ctx context.Context, c clientset.Interface) { nodeName := framework.TestContext.NodeName - podList, err := e2ekubelet.GetKubeletRunningPods(c, nodeName) + podList, err := e2ekubelet.GetKubeletRunningPods(ctx, c, nodeName) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v", nodeName) } diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 98217f6b032..96d90252f97 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -43,9 +43,9 @@ type podCondition func(pod *v1.Pod) (bool, error) // waitForPodsCondition waits for `podCount` number of pods to match a specific pod condition within a timeout duration. // If the timeout is hit, it returns the list of currently running pods. -func waitForPodsCondition(f *framework.Framework, podCount int, timeout time.Duration, condition podCondition) (runningPods []*v1.Pod) { +func waitForPodsCondition(ctx context.Context, f *framework.Framework, podCount int, timeout time.Duration, condition podCondition) (runningPods []*v1.Pod) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { - podList, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{}) + podList, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pods on node: %v", err) continue @@ -91,12 +91,12 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { ginkgo.It("should recover from ip leak", func(ctx context.Context) { pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test") ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods))) - createBatchPodWithRateControl(f, pods, podCreationInterval) + createBatchPodWithRateControl(ctx, f, pods, podCreationInterval) ginkgo.DeferCleanup(deletePodsSync, f, pods) // Give the node some time to stabilize, assume pods that enter RunningReady within // startTimeout fit on the node and the node is now saturated. - runningPods := waitForPodsCondition(f, podCount, startTimeout, testutils.PodRunningReadyOrSucceeded) + runningPods := waitForPodsCondition(ctx, f, podCount, startTimeout, testutils.PodRunningReadyOrSucceeded) if len(runningPods) < minPods { framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods) } @@ -105,7 +105,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { ginkgo.By(fmt.Sprintf("Killing container runtime iteration %d", i)) // Wait for container runtime to be running var pid int - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) if err != nil { return err @@ -128,7 +128,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { } ginkgo.By("Checking currently Running/Ready pods") - postRestartRunningPods := waitForPodsCondition(f, len(runningPods), recoverTimeout, testutils.PodRunningReadyOrSucceeded) + postRestartRunningPods := waitForPodsCondition(ctx, f, len(runningPods), recoverTimeout, testutils.PodRunningReadyOrSucceeded) if len(postRestartRunningPods) == 0 { framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak") } @@ -156,10 +156,10 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { preRestartPodCount := 2 ginkgo.By(fmt.Sprintf("creating %d RestartAlways pods on node", preRestartPodCount)) restartAlwaysPods := newTestPods(preRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test") - createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval) + createBatchPodWithRateControl(ctx, f, restartAlwaysPods, podCreationInterval) ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods) - allPods := waitForPodsCondition(f, preRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded) + allPods := waitForPodsCondition(ctx, f, preRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded) if len(allPods) < preRestartPodCount { framework.Failf("Failed to run sufficient restartAlways pods, got %d but expected %d", len(allPods), preRestartPodCount) } @@ -176,8 +176,8 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { }) ginkgo.By("verifying restartAlways pods stay running", func() { - for start := time.Now(); time.Since(start) < startTimeout; time.Sleep(10 * time.Second) { - postRestartRunningPods := waitForPodsCondition(f, preRestartPodCount, recoverTimeout, testutils.PodRunningReadyOrSucceeded) + for start := time.Now(); time.Since(start) < startTimeout && ctx.Err() == nil; time.Sleep(10 * time.Second) { + postRestartRunningPods := waitForPodsCondition(ctx, f, preRestartPodCount, recoverTimeout, testutils.PodRunningReadyOrSucceeded) if len(postRestartRunningPods) < preRestartPodCount { framework.Failf("fewer pods are running after systemd restart, got %d but expected %d", len(postRestartRunningPods), preRestartPodCount) } @@ -187,10 +187,10 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { ginkgo.By("verifying new pods can be started after a dbus restart") postRestartPodCount := 2 postRestartPods := newTestPods(postRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test") - createBatchPodWithRateControl(f, postRestartPods, podCreationInterval) + createBatchPodWithRateControl(ctx, f, postRestartPods, podCreationInterval) ginkgo.DeferCleanup(deletePodsSync, f, postRestartPods) - allPods = waitForPodsCondition(f, preRestartPodCount+postRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded) + allPods = waitForPodsCondition(ctx, f, preRestartPodCount+postRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded) if len(allPods) < preRestartPodCount+postRestartPodCount { framework.Failf("Failed to run pods after restarting dbus, got %d but expected %d", len(allPods), preRestartPodCount+postRestartPodCount) } @@ -199,7 +199,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { ginkgo.Context("Kubelet", func() { ginkgo.It("should correctly account for terminated pods after restart", func(ctx context.Context) { - node := getLocalNode(f) + node := getLocalNode(ctx, f) cpus := node.Status.Allocatable[v1.ResourceCPU] numCpus := int((&cpus).Value()) if numCpus < 1 { @@ -223,9 +223,9 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { v1.ResourceCPU: resource.MustParse("950m"), // leave a little room for other workloads } } - createBatchPodWithRateControl(f, restartNeverPods, podCreationInterval) + createBatchPodWithRateControl(ctx, f, restartNeverPods, podCreationInterval) ginkgo.DeferCleanup(deletePodsSync, f, restartNeverPods) - completedPods := waitForPodsCondition(f, podCountRestartNever, startTimeout, testutils.PodSucceeded) + completedPods := waitForPodsCondition(ctx, f, podCountRestartNever, startTimeout, testutils.PodSucceeded) if len(completedPods) < podCountRestartNever { framework.Failf("Failed to run sufficient restartNever pods, got %d but expected %d", len(completedPods), podCountRestartNever) @@ -239,11 +239,11 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { v1.ResourceCPU: resource.MustParse("1"), } } - createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval) + createBatchPodWithRateControl(ctx, f, restartAlwaysPods, podCreationInterval) ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods) numAllPods := podCountRestartNever + podCountRestartAlways - allPods := waitForPodsCondition(f, numAllPods, startTimeout, testutils.PodRunningReadyOrSucceeded) + allPods := waitForPodsCondition(ctx, f, numAllPods, startTimeout, testutils.PodRunningReadyOrSucceeded) if len(allPods) < numAllPods { framework.Failf("Failed to run sufficient restartAlways pods, got %d but expected %d", len(allPods), numAllPods) } @@ -258,8 +258,8 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { // restart may think these old pods are consuming CPU and we // will get an OutOfCpu error. ginkgo.By("verifying restartNever pods succeed and restartAlways pods stay running") - for start := time.Now(); time.Since(start) < startTimeout; time.Sleep(10 * time.Second) { - postRestartRunningPods := waitForPodsCondition(f, numAllPods, recoverTimeout, testutils.PodRunningReadyOrSucceeded) + for start := time.Now(); time.Since(start) < startTimeout && ctx.Err() == nil; time.Sleep(10 * time.Second) { + postRestartRunningPods := waitForPodsCondition(ctx, f, numAllPods, recoverTimeout, testutils.PodRunningReadyOrSucceeded) if len(postRestartRunningPods) < numAllPods { framework.Failf("less pods are running after node restart, got %d but expected %d", len(postRestartRunningPods), numAllPods) } diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 6157435fe95..8006a03c40d 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -89,8 +89,8 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { defer os.Remove(configFile) // checkContainerStatus checks whether the container status matches expectation. - checkContainerStatus := func() error { - status, err := container.GetStatus() + checkContainerStatus := func(ctx context.Context) error { + status, err := container.GetStatus(ctx) if err != nil { return fmt.Errorf("failed to get container status: %v", err) } @@ -116,7 +116,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { } } // Check pod phase - phase, err := container.GetPhase() + phase, err := container.GetPhase(ctx) if err != nil { return fmt.Errorf("failed to get pod phase: %v", err) } @@ -131,15 +131,15 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { for i := 1; i <= flakeRetry; i++ { var err error ginkgo.By("create the container") - container.Create() + container.Create(ctx) ginkgo.By("check the container status") for start := time.Now(); time.Since(start) < node.ContainerStatusRetryTimeout; time.Sleep(node.ContainerStatusPollInterval) { - if err = checkContainerStatus(); err == nil { + if err = checkContainerStatus(ctx); err == nil { break } } ginkgo.By("delete the container") - container.Delete() + _ = container.Delete(ctx) if err == nil { break } diff --git a/test/e2e_node/runtimeclass_test.go b/test/e2e_node/runtimeclass_test.go index 2cdb85c942b..1dca583b1cf 100644 --- a/test/e2e_node/runtimeclass_test.go +++ b/test/e2e_node/runtimeclass_test.go @@ -114,11 +114,11 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() { PodFixed: getResourceList("200m", "140Mi"), }, } - _, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{}) + _, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create RuntimeClass resource") }) ginkgo.By("Creating a Guaranteed pod with which has Overhead defined", func() { - guaranteedPod = e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + guaranteedPod = e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pod-with-overhead-", Namespace: f.Namespace.Name, @@ -140,8 +140,8 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() { ginkgo.By("Checking if the pod cgroup was created appropriately", func() { cgroupsToVerify := []string{"pod" + podUID} pod := makePodToVerifyCgroupSize(cgroupsToVerify, "30000", "251658240") - pod = e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + pod = e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) }) diff --git a/test/e2e_node/seccompdefault_test.go b/test/e2e_node/seccompdefault_test.go index 85f2ae86718..32d237aafd9 100644 --- a/test/e2e_node/seccompdefault_test.go +++ b/test/e2e_node/seccompdefault_test.go @@ -39,7 +39,7 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("with SeccompDefault enabled", func() { - tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, cfg *kubeletconfig.KubeletConfiguration) { cfg.SeccompDefault = true }) @@ -63,12 +63,12 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly ginkgo.It("should use the default seccomp profile when unspecified", func(ctx context.Context) { pod := newPod(nil) - e2eoutput.TestContainerOutput(f, "SeccompDefault", pod, 0, []string{"2"}) + e2eoutput.TestContainerOutput(ctx, f, "SeccompDefault", pod, 0, []string{"2"}) }) ginkgo.It("should use unconfined when specified", func(ctx context.Context) { pod := newPod(&v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}}) - e2eoutput.TestContainerOutput(f, "SeccompDefault-unconfined", pod, 0, []string{"0"}) + e2eoutput.TestContainerOutput(ctx, f, "SeccompDefault-unconfined", pod, 0, []string{"0"}) }) }) }) diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index 5e8afbe1427..9aff03ab852 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -46,7 +46,7 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.Context("[NodeConformance][LinuxOnly] Container PID namespace sharing", func() { ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func(ctx context.Context) { ginkgo.By("Create a pod with isolated PID namespaces.") - e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"}, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -75,7 +75,7 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func(ctx context.Context) { ginkgo.By("Create a pod with shared PID namespace.") - e2epod.NewPodClient(f).CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"}, Spec: v1.PodSpec{ ShareProcessNamespace: &[]bool{true}[0], @@ -123,20 +123,20 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndWaitHostPidPod := func(podName string, hostPID bool) { - podClient.Create(makeHostPidPod(podName, + createAndWaitHostPidPod := func(ctx context.Context, podName string, hostPID bool) { + podClient.Create(ctx, makeHostPidPod(podName, busyboxImage, []string{"sh", "-c", "pidof nginx || true"}, hostPID, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) } nginxPid := "" - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID()) - podClient.CreateSync(makeHostPidPod(nginxPodName, + podClient.CreateSync(ctx, makeHostPidPod(nginxPodName, imageutils.GetE2EImage(imageutils.Nginx), nil, true, @@ -149,8 +149,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) - createAndWaitHostPidPod(busyboxPodName, true) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + createAndWaitHostPidPod(ctx, busyboxPodName, true) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -169,8 +169,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) - createAndWaitHostPidPod(busyboxPodName, false) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + createAndWaitHostPidPod(ctx, busyboxPodName, false) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -203,14 +203,14 @@ var _ = SIGDescribe("Security Context", func() { }, } } - createAndWaitHostIPCPod := func(podName string, hostNetwork bool) { - podClient.Create(makeHostIPCPod(podName, + createAndWaitHostIPCPod := func(ctx context.Context, podName string, hostNetwork bool) { + podClient.Create(ctx, makeHostIPCPod(podName, imageutils.GetE2EImage(imageutils.IpcUtils), []string{"sh", "-c", "ipcs -m | awk '{print $2}'"}, hostNetwork, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) } hostSharedMemoryID := "" @@ -225,8 +225,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) - createAndWaitHostIPCPod(ipcutilsPodName, true) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) + createAndWaitHostIPCPod(ctx, ipcutilsPodName, true) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } @@ -240,8 +240,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) - createAndWaitHostIPCPod(ipcutilsPodName, false) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) + createAndWaitHostIPCPod(ctx, ipcutilsPodName, false) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } @@ -283,14 +283,14 @@ var _ = SIGDescribe("Security Context", func() { } } listListeningPortsCommand := []string{"sh", "-c", "netstat -ln"} - createAndWaitHostNetworkPod := func(podName string, hostNetwork bool) { - podClient.Create(makeHostNetworkPod(podName, + createAndWaitHostNetworkPod := func(ctx context.Context, podName string, hostNetwork bool) { + podClient.Create(ctx, makeHostNetworkPod(podName, busyboxImage, listListeningPortsCommand, hostNetwork, )) - podClient.WaitForSuccess(podName, framework.PodStartTimeout) + podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout) } listeningPort := "" @@ -308,8 +308,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) - createAndWaitHostNetworkPod(busyboxPodName, true) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + createAndWaitHostNetworkPod(ctx, busyboxPodName, true) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -322,8 +322,8 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) - createAndWaitHostNetworkPod(busyboxPodName, false) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + createAndWaitHostNetworkPod(ctx, busyboxPodName, false) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index e4113ae032e..20ca73e3105 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -44,15 +44,15 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { f := framework.NewDefaultFramework("summary-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("when querying /stats/summary", func() { - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if !ginkgo.CurrentSpecReport().Failed() { return } if framework.TestContext.DumpLogsOnFailure { - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) } ginkgo.By("Recording processes in system cgroups") - recordSystemCgroupProcesses() + recordSystemCgroupProcesses(ctx) }) ginkgo.It("should report resource usage through the stats api", func(ctx context.Context) { const pod0 = "stats-busybox-0" @@ -61,12 +61,12 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { ginkgo.By("Creating test pods") numRestarts := int32(1) pods := getSummaryTestPods(f, numRestarts, pod0, pod1) - e2epod.NewPodClient(f).CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(ctx, pods) ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted") - gomega.Eventually(func() error { + gomega.Eventually(ctx, func() error { for _, pod := range pods { - err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts) + err := verifyPodRestartCount(ctx, f, pod.Name, len(pod.Spec.Containers), numRestarts) if err != nil { return err } @@ -83,7 +83,7 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { maxStatsAge = time.Minute ) ginkgo.By("Fetching node so we can match against an appropriate memory limit") - node := getLocalNode(f) + node := getLocalNode(ctx, f) memoryCapacity := node.Status.Capacity["memory"] memoryLimit := memoryCapacity.Value() fsCapacityBounds := bounded(100*e2evolume.Mb, 10*e2evolume.Tb) @@ -329,9 +329,9 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { ginkgo.By("Validating /stats/summary") // Give pods a minute to actually start up. - gomega.Eventually(getNodeSummary, 180*time.Second, 15*time.Second).Should(matchExpectations) + gomega.Eventually(ctx, getNodeSummary, 180*time.Second, 15*time.Second).Should(matchExpectations) // Then the summary should match the expectations a few more times. - gomega.Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations) + gomega.Consistently(ctx, getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations) }) }) }) @@ -419,8 +419,8 @@ func recent(d time.Duration) types.GomegaMatcher { gomega.BeTemporally("<", time.Now().Add(3*time.Minute)))) } -func recordSystemCgroupProcesses() { - cfg, err := getCurrentKubeletConfig() +func recordSystemCgroupProcesses(ctx context.Context) { + cfg, err := getCurrentKubeletConfig(ctx) if err != nil { framework.Logf("Failed to read kubelet config: %v", err) return diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index 7141a24a945..641d3524dfb 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -50,9 +50,9 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea }) ginkgo.Context("when create a system-node-critical pod", func() { - tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { diskConsumed := resource.MustParse("200Mi") - summary := eventuallyGetSummary() + summary := eventuallyGetSummary(ctx) availableBytes := *(summary.Node.Fs.AvailableBytes) initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))} initialConfig.EvictionMinimumReclaim = map[string]string{} @@ -63,7 +63,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea var staticPodName, mirrorPodName, podPath string ns := kubeapi.NamespaceSystem - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("create a static system-node-critical pod") staticPodName = "static-disk-hog-" + string(uuid.NewUUID()) mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName @@ -77,25 +77,25 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) ginkgo.By("wait for the mirror pod to be running") - gomega.Eventually(func() error { - return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) - }, time.Minute, time.Second*2).Should(gomega.BeNil()) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) + }, time.Minute, time.Second*2).Should(gomega.Succeed()) }) ginkgo.It("should not be evicted upon DiskPressure", func(ctx context.Context) { ginkgo.By("wait for the node to have DiskPressure condition") - gomega.Eventually(func() error { - if hasNodeCondition(f, v1.NodeDiskPressure) { + gomega.Eventually(ctx, func(ctx context.Context) error { + if hasNodeCondition(ctx, f, v1.NodeDiskPressure) { return nil } msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure) framework.Logf(msg) return fmt.Errorf(msg) - }, time.Minute*2, time.Second*4).Should(gomega.BeNil()) + }, time.Minute*2, time.Second*4).Should(gomega.Succeed()) ginkgo.By("check if it's running all the time") - gomega.Consistently(func() error { - err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + gomega.Consistently(ctx, func(ctx context.Context) error { + err := checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns) if err == nil { framework.Logf("mirror pod %q is running", mirrorPodName) } else { @@ -104,7 +104,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea return err }, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred()) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { defer func() { if framework.TestContext.PrepullImages { // The test may cause the prepulled images to be evicted, @@ -117,17 +117,17 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) ginkgo.By("wait for the mirror pod to disappear") - gomega.Eventually(func() error { - return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) - }, time.Minute, time.Second*2).Should(gomega.BeNil()) + gomega.Eventually(ctx, func(ctx context.Context) error { + return checkMirrorPodDisappear(ctx, f.ClientSet, mirrorPodName, ns) + }, time.Minute, time.Second*2).Should(gomega.Succeed()) ginkgo.By("making sure that node no longer has DiskPressure") - gomega.Eventually(func() error { - if hasNodeCondition(f, v1.NodeDiskPressure) { + gomega.Eventually(ctx, func(ctx context.Context) error { + if hasNodeCondition(ctx, f, v1.NodeDiskPressure) { return fmt.Errorf("Conditions haven't returned to normal, node still has DiskPressure") } return nil - }, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil()) + }, pressureDisappearTimeout, evictionPollInterval).Should(gomega.Succeed()) }) }) }) diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index cee70948769..83385015493 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -290,11 +290,11 @@ func findSRIOVResource(node *v1.Node) (string, int64) { return "", 0 } -func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) { +func validatePodAlignment(ctx context.Context, f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) { for _, cnt := range pod.Spec.Containers { ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name)) - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", cnt.Name, pod.Name) framework.Logf("got pod logs: %v", logs) @@ -307,13 +307,13 @@ func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvI } // validatePodAligmentWithPodScope validates whether all pod's CPUs are affined to the same NUMA node. -func validatePodAlignmentWithPodScope(f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) error { +func validatePodAlignmentWithPodScope(ctx context.Context, f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) error { // Mapping between CPU IDs and NUMA node IDs. podsNUMA := make(map[int]int) ginkgo.By(fmt.Sprintf("validate pod scope alignment for %s pod", pod.Name)) for _, cnt := range pod.Spec.Containers { - logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) + logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name) framework.ExpectNoError(err, "NUMA alignment failed for container [%s] of pod [%s]", cnt.Name, pod.Name) envMap, err := makeEnvMap(logs) framework.ExpectNoError(err, "NUMA alignment failed for container [%s] of pod [%s]", cnt.Name, pod.Name) @@ -336,10 +336,10 @@ func validatePodAlignmentWithPodScope(f *framework.Framework, pod *v1.Pod, envIn return nil } -func runTopologyManagerPolicySuiteTests(f *framework.Framework) { +func runTopologyManagerPolicySuiteTests(ctx context.Context, f *framework.Framework) { var cpuCap, cpuAlloc int64 - cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f) + cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) ginkgo.By(fmt.Sprintf("checking node CPU capacity (%d) and allocatable CPUs (%d)", cpuCap, cpuAlloc)) // Albeit even the weakest CI machines usually have 2 cpus, let's be extra careful and @@ -349,10 +349,10 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) { } ginkgo.By("running a non-Gu pod") - runNonGuPodTest(f, cpuCap) + runNonGuPodTest(ctx, f, cpuCap) ginkgo.By("running a Gu pod") - runGuPodTest(f, 1) + runGuPodTest(ctx, f, 1) // Skip rest of the tests if CPU allocatable < 3. if cpuAlloc < 3 { @@ -360,16 +360,16 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) { } ginkgo.By("running multiple Gu and non-Gu pods") - runMultipleGuNonGuPods(f, cpuCap, cpuAlloc) + runMultipleGuNonGuPods(ctx, f, cpuCap, cpuAlloc) ginkgo.By("running a Gu pod requesting multiple CPUs") - runMultipleCPUGuPod(f) + runMultipleCPUGuPod(ctx, f) ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs") - runMultipleCPUContainersGuPod(f) + runMultipleCPUContainersGuPod(ctx, f) ginkgo.By("running multiple Gu pods") - runMultipleGuPods(f) + runMultipleGuPods(ctx, f) } // waitForAllContainerRemoval waits until all the containers on a given pod are really gone. @@ -377,11 +377,11 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) { // In these cases, we need to make sure the tests clean up after themselves to make sure each test runs in // a pristine environment. The only way known so far to do that is to introduce this wait. // Worth noting, however, that this makes the test runtime much bigger. -func waitForAllContainerRemoval(podName, podNS string) { +func waitForAllContainerRemoval(ctx context.Context, podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) - gomega.Eventually(func() bool { - containers, err := rs.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + gomega.Eventually(ctx, func(ctx context.Context) bool { + containers, err := rs.ListContainers(ctx, &runtimeapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS, @@ -394,14 +394,14 @@ func waitForAllContainerRemoval(podName, podNS string) { }, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue()) } -func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { +func runTopologyManagerPositiveTest(ctx context.Context, f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { podMap := make(map[string]*v1.Pod) for podID := 0; podID < numPods; podID++ { podName := fmt.Sprintf("gu-pod-%d", podID) framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = e2epod.NewPodClient(f).CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) framework.Logf("created pod %s", podName) podMap[podName] = pod } @@ -410,20 +410,20 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr // we can do a menaingful validation only when using the single-numa node policy if envInfo.policy == topologymanager.PolicySingleNumaNode { for _, pod := range podMap { - validatePodAlignment(f, pod, envInfo) + validatePodAlignment(ctx, f, pod, envInfo) } if envInfo.scope == podScopeTopology { for _, pod := range podMap { - err := validatePodAlignmentWithPodScope(f, pod, envInfo) + err := validatePodAlignmentWithPodScope(ctx, f, pod, envInfo) framework.ExpectNoError(err) } } } - deletePodsAsync(f, podMap) + deletePodsAsync(ctx, f, podMap) } -func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) { +func deletePodsAsync(ctx context.Context, f *framework.Framework, podMap map[string]*v1.Pod) { var wg sync.WaitGroup for _, pod := range podMap { wg.Add(1) @@ -431,27 +431,27 @@ func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - deletePodSyncByName(f, podName) - waitForAllContainerRemoval(podName, podNS) + deletePodSyncByName(ctx, f, podName) + waitForAllContainerRemoval(ctx, podName, podNS) }(pod.Namespace, pod.Name) } wg.Wait() } -func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { +func runTopologyManagerNegativeTest(ctx context.Context, f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { podName := "gu-pod" framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = e2epod.NewPodClient(f).Create(pod) - err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { + pod = e2epod.NewPodClient(f).Create(ctx, pod) + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { return true, nil } return false, nil }) framework.ExpectNoError(err) - pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { @@ -461,7 +461,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt framework.Failf("pod %s failed for wrong reason: %q", pod.Name, pod.Status.Reason) } - deletePodSyncByName(f, pod.Name) + deletePodSyncByName(ctx, f, pod.Name) } func isTopologyAffinityError(pod *v1.Pod) bool { @@ -498,20 +498,20 @@ type sriovData struct { resourceAmount int64 } -func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sriovData { - sd := createSRIOVConfigOrFail(f, configMap) +func setupSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap) *sriovData { + sd := createSRIOVConfigOrFail(ctx, f, configMap) - e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute) + e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, framework.TestContext.NodeName, 5*time.Minute) - sd.pod = createSRIOVPodOrFail(f) + sd.pod = createSRIOVPodOrFail(ctx, f) return sd } -func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sriovData { +func createSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap) *sriovData { var err error ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", metav1.NamespaceSystem, configMap.Name)) - if _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { + if _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(ctx, configMap, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -521,7 +521,7 @@ func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *s } serviceAccount := readServiceAccountV1OrDie(data) ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name)) - if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil { + if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(ctx, serviceAccount, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err) } @@ -531,15 +531,15 @@ func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *s } } -func createSRIOVPodOrFail(f *framework.Framework) *v1.Pod { +func createSRIOVPodOrFail(ctx context.Context, f *framework.Framework) *v1.Pod { dp := getSRIOVDevicePluginPod() dp.Spec.NodeName = framework.TestContext.NodeName ginkgo.By("Create SRIOV device plugin pod") - dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{}) + dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, dp, metav1.CreateOptions{}) framework.ExpectNoError(err) - if err = e2epod.WaitForPodCondition(f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil { + if err = e2epod.WaitForPodCondition(ctx, f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil { framework.Logf("SRIOV Pod %v took too long to enter running/ready: %v", dp.Name, err) } framework.ExpectNoError(err) @@ -549,12 +549,12 @@ func createSRIOVPodOrFail(f *framework.Framework) *v1.Pod { // waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout. // if exits successfully, updates the sriovData with the resources which were found. -func waitForSRIOVResources(f *framework.Framework, sd *sriovData) { +func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *sriovData) { sriovResourceName := "" var sriovResourceAmount int64 ginkgo.By("Waiting for devices to become available on the local node") - gomega.Eventually(func() bool { - node := getLocalNode(f) + gomega.Eventually(ctx, func(ctx context.Context) bool { + node := getLocalNode(ctx, f) sriovResourceName, sriovResourceAmount = findSRIOVResource(node) return sriovResourceAmount > minSriovResource }, 2*time.Minute, framework.Poll).Should(gomega.BeTrue()) @@ -564,7 +564,7 @@ func waitForSRIOVResources(f *framework.Framework, sd *sriovData) { framework.Logf("Detected SRIOV allocatable devices name=%q amount=%d", sd.resourceName, sd.resourceAmount) } -func deleteSRIOVPodOrFail(f *framework.Framework, sd *sriovData) { +func deleteSRIOVPodOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) { var err error gp := int64(0) deleteOptions := metav1.DeleteOptions{ @@ -572,12 +572,12 @@ func deleteSRIOVPodOrFail(f *framework.Framework, sd *sriovData) { } ginkgo.By(fmt.Sprintf("Delete SRIOV device plugin pod %s/%s", sd.pod.Namespace, sd.pod.Name)) - err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, deleteOptions) + err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(ctx, sd.pod.Name, deleteOptions) framework.ExpectNoError(err) - waitForAllContainerRemoval(sd.pod.Name, sd.pod.Namespace) + waitForAllContainerRemoval(ctx, sd.pod.Name, sd.pod.Namespace) } -func removeSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) { +func removeSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) { var err error gp := int64(0) deleteOptions := metav1.DeleteOptions{ @@ -585,25 +585,25 @@ func removeSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) { } ginkgo.By(fmt.Sprintf("Deleting configMap %v/%v", metav1.NamespaceSystem, sd.configMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, deleteOptions) + err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, sd.configMap.Name, deleteOptions) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Deleting serviceAccount %v/%v", metav1.NamespaceSystem, sd.serviceAccount.Name)) - err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, deleteOptions) + err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(ctx, sd.serviceAccount.Name, deleteOptions) framework.ExpectNoError(err) } -func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) { - deleteSRIOVPodOrFail(f, sd) - removeSRIOVConfigOrFail(f, sd) +func teardownSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) { + deleteSRIOVPodOrFail(ctx, f, sd) + removeSRIOVConfigOrFail(ctx, f, sd) } -func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) { +func runTMScopeResourceAlignmentTestSuite(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) { threadsPerCore := getSMTLevel() - sd := setupSRIOVConfigOrFail(f, configMap) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) var ctnAttrs, initCtnAttrs []tmCtnAttribute - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) envInfo := &testEnvInfo{ numaNodes: numaNodes, @@ -631,7 +631,7 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1. deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) numCores := threadsPerCore * coreCount coresReq := fmt.Sprintf("%dm", numCores*1000) @@ -652,7 +652,7 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1. deviceLimit: "1", }, } - runTopologyManagerNegativeTest(f, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerNegativeTest(ctx, f, ctnAttrs, initCtnAttrs, envInfo) // The Topology Manager with pod scope should calculate how many CPUs it needs to admit a pod basing on two requests: // the maximum of init containers' demand for CPU and sum of app containers' requests for CPU. @@ -693,15 +693,15 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1. deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) - teardownSRIOVConfigOrFail(f, sd) + teardownSRIOVConfigOrFail(ctx, f, sd) } -func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) { +func runTopologyManagerNodeAlignmentSuiteTests(ctx context.Context, f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) { threadsPerCore := getSMTLevel() - waitForSRIOVResources(f, sd) + waitForSRIOVResources(ctx, f, sd) envInfo := &testEnvInfo{ numaNodes: numaNodes, @@ -724,7 +724,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo) ginkgo.By(fmt.Sprintf("Successfully admit one guaranteed pod with 2 cores, 1 %s device", sd.resourceName)) ctnAttrs = []tmCtnAttribute{ @@ -737,7 +737,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo) if reservedSystemCPUs != "" { // to avoid false negatives, we have put reserved CPUs in such a way there is at least a NUMA node @@ -755,7 +755,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo) } if sd.resourceAmount > 1 { @@ -772,7 +772,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with 2 cores, 1 %s device", sd.resourceName)) ctnAttrs = []tmCtnAttribute{ @@ -785,7 +785,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) // testing more complex conditions require knowledge about the system cpu+bus topology } @@ -811,7 +811,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo) ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with two containers, each with 1 core, 1 %s device", sd.resourceName)) ctnAttrs = []tmCtnAttribute{ @@ -832,7 +832,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with two containers, both with with 2 cores, one with 1 %s device", sd.resourceName)) ctnAttrs = []tmCtnAttribute{ @@ -850,7 +850,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov cpuLimit: "2000m", }, } - runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo) } // this is the only policy that can guarantee reliable rejects @@ -869,7 +869,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov deviceLimit: "1", }, } - runTopologyManagerNegativeTest(f, ctnAttrs, initCtnAttrs, envInfo) + runTopologyManagerNegativeTest(ctx, f, ctnAttrs, initCtnAttrs, envInfo) } } @@ -885,7 +885,7 @@ func runTopologyManagerTests(f *framework.Framework) { } ginkgo.It("run Topology Manager policy test suite", func(ctx context.Context) { - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) scope := containerScopeTopology @@ -895,9 +895,9 @@ func runTopologyManagerTests(f *framework.Framework) { framework.Logf("Configuring topology Manager policy to %s", policy) newCfg, _ := configureTopologyManagerInKubelet(oldCfg, policy, scope, nil, 0) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) // Run the tests - runTopologyManagerPolicySuiteTests(f) + runTopologyManagerPolicySuiteTests(ctx, f) } }) @@ -906,10 +906,10 @@ func runTopologyManagerTests(f *framework.Framework) { configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) - sd := setupSRIOVConfigOrFail(f, configMap) + sd := setupSRIOVConfigOrFail(ctx, f, configMap) ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd) scope := containerScopeTopology @@ -919,9 +919,9 @@ func runTopologyManagerTests(f *framework.Framework) { framework.Logf("Configuring topology Manager policy to %s", policy) newCfg, reservedSystemCPUs := configureTopologyManagerInKubelet(oldCfg, policy, scope, configMap, numaNodes) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) - runTopologyManagerNodeAlignmentSuiteTests(f, sd, reservedSystemCPUs, policy, numaNodes, coreCount) + runTopologyManagerNodeAlignmentSuiteTests(ctx, f, sd, reservedSystemCPUs, policy, numaNodes, coreCount) } }) @@ -930,22 +930,22 @@ func runTopologyManagerTests(f *framework.Framework) { configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) policy := topologymanager.PolicySingleNumaNode scope := podScopeTopology newCfg, reservedSystemCPUs := configureTopologyManagerInKubelet(oldCfg, policy, scope, configMap, numaNodes) - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) - runTMScopeResourceAlignmentTestSuite(f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount) + runTMScopeResourceAlignmentTestSuite(ctx, f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if oldCfg != nil { // restore kubelet config - updateKubeletConfig(f, oldCfg, true) + updateKubeletConfig(ctx, f, oldCfg, true) } }) } diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index e9735eaace8..311ca6cccac 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -82,8 +82,8 @@ const ( var kubeletHealthCheckURL = fmt.Sprintf("http://127.0.0.1:%d/healthz", ports.KubeletHealthzPort) -func getNodeSummary() (*stats.Summary, error) { - kubeletConfig, err := getCurrentKubeletConfig() +func getNodeSummary(ctx context.Context) (*stats.Summary, error) { + kubeletConfig, err := getCurrentKubeletConfig(ctx) if err != nil { return nil, fmt.Errorf("failed to get current kubelet config") } @@ -114,7 +114,7 @@ func getNodeSummary() (*stats.Summary, error) { return &summary, nil } -func getV1alpha1NodeDevices() (*kubeletpodresourcesv1alpha1.ListPodResourcesResponse, error) { +func getV1alpha1NodeDevices(ctx context.Context) (*kubeletpodresourcesv1alpha1.ListPodResourcesResponse, error) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) if err != nil { return nil, fmt.Errorf("Error getting local endpoint: %v", err) @@ -124,7 +124,7 @@ func getV1alpha1NodeDevices() (*kubeletpodresourcesv1alpha1.ListPodResourcesResp return nil, fmt.Errorf("Error getting grpc client: %v", err) } defer conn.Close() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() resp, err := client.List(ctx, &kubeletpodresourcesv1alpha1.ListPodResourcesRequest{}) if err != nil { @@ -133,7 +133,7 @@ func getV1alpha1NodeDevices() (*kubeletpodresourcesv1alpha1.ListPodResourcesResp return resp, nil } -func getV1NodeDevices() (*kubeletpodresourcesv1.ListPodResourcesResponse, error) { +func getV1NodeDevices(ctx context.Context) (*kubeletpodresourcesv1.ListPodResourcesResponse, error) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) if err != nil { return nil, fmt.Errorf("Error getting local endpoint: %v", err) @@ -143,7 +143,7 @@ func getV1NodeDevices() (*kubeletpodresourcesv1.ListPodResourcesResponse, error) return nil, fmt.Errorf("Error getting gRPC client: %v", err) } defer conn.Close() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() resp, err := client.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{}) if err != nil { @@ -153,46 +153,46 @@ func getV1NodeDevices() (*kubeletpodresourcesv1.ListPodResourcesResponse, error) } // Returns the current KubeletConfiguration -func getCurrentKubeletConfig() (*kubeletconfig.KubeletConfiguration, error) { +func getCurrentKubeletConfig(ctx context.Context) (*kubeletconfig.KubeletConfiguration, error) { // namespace only relevant if useProxy==true, so we don't bother - return e2ekubelet.GetCurrentKubeletConfig(framework.TestContext.NodeName, "", false) + return e2ekubelet.GetCurrentKubeletConfig(ctx, framework.TestContext.NodeName, "", false) } // Must be called within a Context. Allows the function to modify the KubeletConfiguration during the BeforeEach of the context. // The change is reverted in the AfterEach of the context. // Returns true on success. -func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) { +func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration)) { var oldCfg *kubeletconfig.KubeletConfiguration - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { var err error - oldCfg, err = getCurrentKubeletConfig() + oldCfg, err = getCurrentKubeletConfig(ctx) framework.ExpectNoError(err) newCfg := oldCfg.DeepCopy() - updateFunction(newCfg) + updateFunction(ctx, newCfg) if apiequality.Semantic.DeepEqual(*newCfg, *oldCfg) { return } - updateKubeletConfig(f, newCfg, true) + updateKubeletConfig(ctx, f, newCfg, true) }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { if oldCfg != nil { // Update the Kubelet configuration. - updateKubeletConfig(f, oldCfg, true) + updateKubeletConfig(ctx, f, oldCfg, true) } }) } -func updateKubeletConfig(f *framework.Framework, kubeletConfig *kubeletconfig.KubeletConfiguration, deleteStateFiles bool) { +func updateKubeletConfig(ctx context.Context, f *framework.Framework, kubeletConfig *kubeletconfig.KubeletConfiguration, deleteStateFiles bool) { // Update the Kubelet configuration. ginkgo.By("Stopping the kubelet") startKubelet := stopKubelet() // wait until the kubelet health check will fail - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, time.Minute, time.Second).Should(gomega.BeFalse()) @@ -208,13 +208,13 @@ func updateKubeletConfig(f *framework.Framework, kubeletConfig *kubeletconfig.Ku startKubelet() // wait until the kubelet health check will succeed - gomega.Eventually(func() bool { + gomega.Eventually(ctx, func() bool { return kubeletHealthCheck(kubeletHealthCheckURL) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) // Wait for the Kubelet to be ready. - gomega.Eventually(func() bool { - nodes, err := e2enode.TotalReady(f.ClientSet) + gomega.Eventually(ctx, func(ctx context.Context) bool { + nodes, err := e2enode.TotalReady(ctx, f.ClientSet) framework.ExpectNoError(err) return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) @@ -226,8 +226,8 @@ func deleteStateFile(stateFileName string) { } // listNamespaceEvents lists the events in the given namespace. -func listNamespaceEvents(c clientset.Interface, ns string) error { - ls, err := c.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{}) +func listNamespaceEvents(ctx context.Context, c clientset.Interface, ns string) error { + ls, err := c.CoreV1().Events(ns).List(ctx, metav1.ListOptions{}) if err != nil { return err } @@ -237,20 +237,20 @@ func listNamespaceEvents(c clientset.Interface, ns string) error { return nil } -func logPodEvents(f *framework.Framework) { +func logPodEvents(ctx context.Context, f *framework.Framework) { framework.Logf("Summary of pod events during the test:") - err := listNamespaceEvents(f.ClientSet, f.Namespace.Name) + err := listNamespaceEvents(ctx, f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) } -func logNodeEvents(f *framework.Framework) { +func logNodeEvents(ctx context.Context, f *framework.Framework) { framework.Logf("Summary of node events during the test:") - err := listNamespaceEvents(f.ClientSet, "") + err := listNamespaceEvents(ctx, f.ClientSet, "") framework.ExpectNoError(err) } -func getLocalNode(f *framework.Framework) *v1.Node { - nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) +func getLocalNode(ctx context.Context, f *framework.Framework) *v1.Node { + nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.") return &nodeList.Items[0] @@ -260,8 +260,8 @@ func getLocalNode(f *framework.Framework) *v1.Node { // getLocalTestNode is a variant of `getLocalNode` which reports but does not set any requirement about the node readiness state, letting // the caller decide. The check is intentionally done like `getLocalNode` does. // Note `getLocalNode` aborts (as in ginkgo.Expect) the test implicitly if the worker node is not ready. -func getLocalTestNode(f *framework.Framework) (*v1.Node, bool) { - node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) +func getLocalTestNode(ctx context.Context, f *framework.Framework) (*v1.Node, bool) { + node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ready := e2enode.IsNodeReady(node) schedulable := e2enode.IsNodeSchedulable(node) @@ -272,12 +272,12 @@ func getLocalTestNode(f *framework.Framework) (*v1.Node, bool) { // logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus // metrics exposed on the current node and identified by the metricNames. // The Kubelet subsystem prefix is automatically prepended to these metric names. -func logKubeletLatencyMetrics(metricNames ...string) { +func logKubeletLatencyMetrics(ctx context.Context, metricNames ...string) { metricSet := sets.NewString() for _, key := range metricNames { metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key) } - metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(fmt.Sprintf("%s:%d", framework.TestContext.NodeName, ports.KubeletReadOnlyPort), "/metrics") + metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, fmt.Sprintf("%s:%d", framework.TestContext.NodeName, ports.KubeletReadOnlyPort), "/metrics") if err != nil { framework.Logf("Error getting kubelet metrics: %v", err) } else { diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index ce350afad34..98fc90f9006 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { ) ginkgo.By("Creating a pod with a memory backed volume that exits success without restart", func() { volumeName = "memory-volume" - memoryBackedPod = e2epod.NewPodClient(f).Create(&v1.Pod{ + memoryBackedPod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -74,7 +74,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { }, }, }) - err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, memoryBackedPod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Verifying the memory backed volume was removed from node", func() { @@ -83,7 +83,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { for i := 0; i < 10; i++ { // need to create a new verification pod on each pass since updates //to the HostPath volume aren't propogated to the pod - pod := e2epod.NewPodClient(f).Create(&v1.Pod{ + pod := e2epod.NewPodClient(f).Create(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -115,9 +115,9 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { }, }, }) - err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) gp := int64(1) - e2epod.NewPodClient(f).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + _ = e2epod.NewPodClient(f).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) if err == nil { break } diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index c84d610f030..21585a5204e 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -58,7 +58,7 @@ func NewIntegrationTestNodePreparerWithNodeSpec(client clientset.Interface, coun } // PrepareNodes prepares countToStrategy test nodes. -func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error { +func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNodeIndex int) error { numNodes := 0 for _, v := range p.countToStrategy { numNodes += v.Count @@ -89,7 +89,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error { for i := 0; i < numNodes; i++ { var err error for retry := 0; retry < retries; retry++ { - _, err = p.client.CoreV1().Nodes().Create(context.TODO(), baseNode, metav1.CreateOptions{}) + _, err = p.client.CoreV1().Nodes().Create(ctx, baseNode, metav1.CreateOptions{}) if err == nil { break } @@ -106,7 +106,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error { index := nextNodeIndex for _, v := range p.countToStrategy { for i := 0; i < v.Count; i, index = i+1, index+1 { - if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { + if err := testutils.DoPrepareNode(ctx, p.client, &nodes.Items[index], v.Strategy); err != nil { klog.Errorf("Aborting node preparation: %v", err) return err } @@ -116,7 +116,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes(nextNodeIndex int) error { } // CleanupNodes deletes existing test nodes. -func (p *IntegrationTestNodePreparer) CleanupNodes() error { +func (p *IntegrationTestNodePreparer) CleanupNodes(ctx context.Context) error { // TODO(#93794): make CleanupNodes only clean up the nodes created by this // IntegrationTestNodePreparer to make this more intuitive. nodes, err := waitListAllNodes(p.client) @@ -125,7 +125,7 @@ func (p *IntegrationTestNodePreparer) CleanupNodes() error { } var errRet error for i := range nodes.Items { - if err := p.client.CoreV1().Nodes().Delete(context.TODO(), nodes.Items[i].Name, metav1.DeleteOptions{}); err != nil { + if err := p.client.CoreV1().Nodes().Delete(ctx, nodes.Items[i].Name, metav1.DeleteOptions{}); err != nil { klog.Errorf("Error while deleting Node: %v", err) errRet = err } diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 635eea5718e..0901e870095 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -687,11 +687,11 @@ func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem { if err != nil { b.Fatalf("op %d: %v", opIndex, err) } - if err := nodePreparer.PrepareNodes(nextNodeIndex); err != nil { + if err := nodePreparer.PrepareNodes(ctx, nextNodeIndex); err != nil { b.Fatalf("op %d: %v", opIndex, err) } b.Cleanup(func() { - nodePreparer.CleanupNodes() + _ = nodePreparer.CleanupNodes(ctx) }) nextNodeIndex += concreteOp.Count @@ -742,7 +742,7 @@ func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem { go collector.run(collectorCtx) } } - if err := createPods(b, namespace, concreteOp, client); err != nil { + if err := createPods(ctx, b, namespace, concreteOp, client); err != nil { b.Fatalf("op %d: %v", opIndex, err) } if concreteOp.SkipWaitToCompletion { @@ -958,7 +958,7 @@ func getNodePreparer(prefix string, cno *createNodesOp, clientset clientset.Inte ), nil } -func createPods(b *testing.B, namespace string, cpo *createPodsOp, clientset clientset.Interface) error { +func createPods(ctx context.Context, b *testing.B, namespace string, cpo *createPodsOp, clientset clientset.Interface) error { strategy, err := getPodStrategy(cpo) if err != nil { return err @@ -967,7 +967,7 @@ func createPods(b *testing.B, namespace string, cpo *createPodsOp, clientset cli config := testutils.NewTestPodCreatorConfig() config.AddStrategy(namespace, cpo.Count, strategy) podCreator := testutils.NewTestPodCreator(clientset, config) - return podCreator.CreatePods() + return podCreator.CreatePods(ctx) } // waitUntilPodsScheduledInNamespace blocks until all pods in the given diff --git a/test/utils/crd/crd_util.go b/test/utils/crd/crd_util.go index 018086b1c28..a953385c37b 100644 --- a/test/utils/crd/crd_util.go +++ b/test/utils/crd/crd_util.go @@ -17,6 +17,7 @@ limitations under the License. package crd import ( + "context" "fmt" "k8s.io/utils/pointer" @@ -31,7 +32,7 @@ import ( ) // CleanCrdFn declares the clean up function needed to remove the CRD -type CleanCrdFn func() error +type CleanCrdFn func(ctx context.Context) error // TestCrd holds all the pieces needed to test with the CRD type TestCrd struct { @@ -111,7 +112,7 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt testcrd.APIExtensionClient = apiExtensionClient testcrd.Crd = crd testcrd.DynamicClients = resourceClients - testcrd.CleanUp = func() error { + testcrd.CleanUp = func(ctx context.Context) error { err := fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient) if err != nil { framework.Failf("failed to delete CustomResourceDefinition(%s): %v", name, err) diff --git a/test/utils/runners.go b/test/utils/runners.go index 344e7fc54c7..2c3d3015ab6 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -65,17 +65,17 @@ func removePtr(replicas *int32) int32 { return *replicas } -func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { +func WaitUntilPodIsScheduled(ctx context.Context, c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { // Wait until it's scheduled - p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } pollingPeriod := 200 * time.Millisecond startTime := time.Now() - for startTime.Add(timeout).After(time.Now()) { + for startTime.Add(timeout).After(time.Now()) && ctx.Err() == nil { time.Sleep(pollingPeriod) - p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -83,13 +83,13 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time return nil, fmt.Errorf("timed out after %v when waiting for pod %v/%v to start", timeout, namespace, name) } -func RunPodAndGetNodeName(c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) { +func RunPodAndGetNodeName(ctx context.Context, c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) { name := pod.Name namespace := pod.Namespace if err := CreatePodWithRetries(c, namespace, pod); err != nil { return "", err } - p, err := WaitUntilPodIsScheduled(c, name, namespace, timeout) + p, err := WaitUntilPodIsScheduled(ctx, c, name, namespace, timeout) if err != nil { return "", err } @@ -173,8 +173,8 @@ type RCConfig struct { LogFunc func(fmt string, args ...interface{}) // If set those functions will be used to gather data from Nodes - in integration tests where no // kubelets are running those variables should be nil. - NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) - ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) + NodeDumpFunc func(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) + ContainerDumpFunc func(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) // Names of the secrets and configmaps to mount. SecretNames []string @@ -288,16 +288,16 @@ func Diff(oldPods []*v1.Pod, curPods []*v1.Pod) PodDiff { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunDeployment(config DeploymentConfig) error { +func RunDeployment(ctx context.Context, config DeploymentConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *DeploymentConfig) Run() error { - return RunDeployment(*config) +func (config *DeploymentConfig) Run(ctx context.Context) error { + return RunDeployment(ctx, *config) } func (config *DeploymentConfig) GetKind() schema.GroupKind { @@ -374,16 +374,16 @@ func (config *DeploymentConfig) create() error { // and waits until all the pods it launches to reach the "Running" state. // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunReplicaSet(config ReplicaSetConfig) error { +func RunReplicaSet(ctx context.Context, config ReplicaSetConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *ReplicaSetConfig) Run() error { - return RunReplicaSet(*config) +func (config *ReplicaSetConfig) Run(ctx context.Context) error { + return RunReplicaSet(ctx, *config) } func (config *ReplicaSetConfig) GetKind() schema.GroupKind { @@ -456,16 +456,16 @@ func (config *ReplicaSetConfig) create() error { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunJob(config JobConfig) error { +func RunJob(ctx context.Context, config JobConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *JobConfig) Run() error { - return RunJob(*config) +func (config *JobConfig) Run(ctx context.Context) error { + return RunJob(ctx, *config) } func (config *JobConfig) GetKind() schema.GroupKind { @@ -530,16 +530,16 @@ func (config *JobConfig) create() error { // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the // namespace lifecycle for handling Cleanup). -func RunRC(config RCConfig) error { +func RunRC(ctx context.Context, config RCConfig) error { err := config.create() if err != nil { return err } - return config.start() + return config.start(ctx) } -func (config *RCConfig) Run() error { - return RunRC(*config) +func (config *RCConfig) Run(ctx context.Context) error { + return RunRC(ctx, *config) } func (config *RCConfig) GetName() string { @@ -776,7 +776,7 @@ func ComputeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus { return startupStatus } -func (config *RCConfig) start() error { +func (config *RCConfig) start(ctx context.Context) error { // Don't force tests to fail if they don't care about containers restarting. var maxContainerFailures int if config.MaxContainerFailures == nil { @@ -824,11 +824,11 @@ func (config *RCConfig) start() error { if startupStatus.FailedContainers > maxContainerFailures { if config.NodeDumpFunc != nil { - config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog) + config.NodeDumpFunc(ctx, config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog) } if config.ContainerDumpFunc != nil { // Get the logs from the failed containers to help diagnose what caused them to fail - config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog) + config.ContainerDumpFunc(ctx, config.Client, config.Namespace, config.RCConfigLog) } return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures) } @@ -858,7 +858,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := metav1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil { + if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(ctx, options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) } @@ -946,8 +946,8 @@ type CountToStrategy struct { } type TestNodePreparer interface { - PrepareNodes(nextNodeIndex int) error - CleanupNodes() error + PrepareNodes(ctx context.Context, nextNodeIndex int) error + CleanupNodes(ctx context.Context) error } type PrepareNodeStrategy interface { @@ -955,12 +955,12 @@ type PrepareNodeStrategy interface { PreparePatch(node *v1.Node) []byte // Create or modify any objects that depend on the node before the test starts. // Caller will re-try when http.StatusConflict error is returned. - PrepareDependentObjects(node *v1.Node, client clientset.Interface) error + PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error // Clean up any node modifications after the test finishes. - CleanupNode(node *v1.Node) *v1.Node + CleanupNode(ctx context.Context, node *v1.Node) *v1.Node // Clean up any objects that depend on the node after the test finishes. // Caller will re-try when http.StatusConflict error is returned. - CleanupDependentObjects(nodeName string, client clientset.Interface) error + CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error } type TrivialNodePrepareStrategy struct{} @@ -971,16 +971,16 @@ func (*TrivialNodePrepareStrategy) PreparePatch(*v1.Node) []byte { return []byte{} } -func (*TrivialNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (*TrivialNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := *node return &nodeCopy } -func (*TrivialNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*TrivialNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*TrivialNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } @@ -1009,7 +1009,7 @@ func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte { return []byte(patch) } -func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *LabelNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 { delete(nodeCopy.Labels, s.LabelKey) @@ -1017,11 +1017,11 @@ func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (*LabelNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*LabelNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*LabelNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } @@ -1069,7 +1069,7 @@ func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte { return patch } -func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *NodeAllocatableStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() for name := range s.NodeAllocatable { delete(nodeCopy.Status.Allocatable, name) @@ -1077,7 +1077,7 @@ func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error { +func (s *NodeAllocatableStrategy) createCSINode(ctx context.Context, nodeName string, client clientset.Interface) error { csiNode := &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, @@ -1099,7 +1099,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d) } - _, err := client.StorageV1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{}) + _, err := client.StorageV1().CSINodes().Create(ctx, csiNode, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error @@ -1108,7 +1108,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse return err } -func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, client clientset.Interface) error { +func (s *NodeAllocatableStrategy) updateCSINode(ctx context.Context, csiNode *storagev1.CSINode, client clientset.Interface) error { for driverName, allocatable := range s.CsiNodeAllocatable { found := false for i, driver := range csiNode.Spec.Drivers { @@ -1129,23 +1129,23 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, clie } csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",") - _, err := client.StorageV1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{}) + _, err := client.StorageV1().CSINodes().Update(ctx, csiNode, metav1.UpdateOptions{}) return err } -func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { - csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) +func (s *NodeAllocatableStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { + csiNode, err := client.StorageV1().CSINodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { - return s.createCSINode(node.Name, client) + return s.createCSINode(ctx, node.Name, client) } return err } - return s.updateCSINode(csiNode, client) + return s.updateCSINode(ctx, csiNode, client) } -func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { - csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func (s *NodeAllocatableStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { + csiNode, err := client.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -1160,7 +1160,7 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien } } } - return s.updateCSINode(csiNode, client) + return s.updateCSINode(ctx, csiNode, client) } // UniqueNodeLabelStrategy sets a unique label for each node. @@ -1182,7 +1182,7 @@ func (s *UniqueNodeLabelStrategy) PreparePatch(*v1.Node) []byte { return []byte(patch) } -func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node { +func (s *UniqueNodeLabelStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node { nodeCopy := node.DeepCopy() if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 { delete(nodeCopy.Labels, s.LabelKey) @@ -1190,22 +1190,22 @@ func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node { return nodeCopy } -func (*UniqueNodeLabelStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { +func (*UniqueNodeLabelStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error { return nil } -func (*UniqueNodeLabelStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { +func (*UniqueNodeLabelStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error { return nil } -func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error { +func DoPrepareNode(ctx context.Context, client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error { var err error patch := strategy.PreparePatch(node) if len(patch) == 0 { return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { + if _, err = client.CoreV1().Nodes().Patch(ctx, node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1218,7 +1218,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo } for attempt := 0; attempt < retries; attempt++ { - if err = strategy.PrepareDependentObjects(node, client); err == nil { + if err = strategy.PrepareDependentObjects(ctx, node, client); err == nil { break } if !apierrors.IsConflict(err) { @@ -1232,19 +1232,19 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } -func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { +func DoCleanupNode(ctx context.Context, client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { var err error for attempt := 0; attempt < retries; attempt++ { var node *v1.Node - node, err = client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + node, err = client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) } - updatedNode := strategy.CleanupNode(node) + updatedNode := strategy.CleanupNode(ctx, node) if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil { + if _, err = client.CoreV1().Nodes().Update(ctx, updatedNode, metav1.UpdateOptions{}); err == nil { break } if !apierrors.IsConflict(err) { @@ -1257,7 +1257,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare } for attempt := 0; attempt < retries; attempt++ { - err = strategy.CleanupDependentObjects(nodeName, client) + err = strategy.CleanupDependentObjects(ctx, nodeName, client) if err == nil { break } @@ -1272,7 +1272,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare return nil } -type TestPodCreateStrategy func(client clientset.Interface, namespace string, podCount int) error +type TestPodCreateStrategy func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error type CountToPodStrategy struct { Count int @@ -1304,10 +1304,10 @@ func NewTestPodCreator(client clientset.Interface, config *TestPodCreatorConfig) } } -func (c *TestPodCreator) CreatePods() error { +func (c *TestPodCreator) CreatePods(ctx context.Context) error { for ns, v := range *(c.Config) { for _, countToStrategy := range v { - if err := countToStrategy.Strategy(c.Client, ns, countToStrategy.Count); err != nil { + if err := countToStrategy.Strategy(ctx, c.Client, ns, countToStrategy.Count); err != nil { return err } } @@ -1342,7 +1342,7 @@ func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1 return nil } -func CreatePod(client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error { +func CreatePod(ctx context.Context, client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error { var createError error lock := sync.Mutex{} createPodFunc := func(i int) { @@ -1354,14 +1354,14 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe } if podCount < 30 { - workqueue.ParallelizeUntil(context.TODO(), podCount, podCount, createPodFunc) + workqueue.ParallelizeUntil(ctx, podCount, podCount, createPodFunc) } else { - workqueue.ParallelizeUntil(context.TODO(), 30, podCount, createPodFunc) + workqueue.ParallelizeUntil(ctx, 30, podCount, createPodFunc) } return createError } -func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error { +func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error { var createError error lock := sync.Mutex{} createPodFunc := func(i int) { @@ -1400,7 +1400,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, } // We need to update statuses separately, as creating pv/pvc resets status to the default one. - if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil { + if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(ctx, pvc, metav1.UpdateOptions{}); err != nil { lock.Lock() defer lock.Unlock() createError = fmt.Errorf("error updating PVC status: %s", err) @@ -1414,7 +1414,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, return } // We need to update statuses separately, as creating pv/pvc resets status to the default one. - if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { + if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(ctx, pv, metav1.UpdateOptions{}); err != nil { lock.Lock() defer lock.Unlock() createError = fmt.Errorf("error updating PV status: %s", err) @@ -1442,9 +1442,9 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, } if count < 30 { - workqueue.ParallelizeUntil(context.TODO(), count, count, createPodFunc) + workqueue.ParallelizeUntil(ctx, count, count, createPodFunc) } else { - workqueue.ParallelizeUntil(context.TODO(), 30, count, createPodFunc) + workqueue.ParallelizeUntil(ctx, 30, count, createPodFunc) } return createError } @@ -1472,8 +1472,8 @@ func createController(client clientset.Interface, controllerName, namespace stri } func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { - return CreatePod(client, namespace, podCount, podTemplate) + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { + return CreatePod(ctx, client, namespace, podCount, podTemplate) } } @@ -1481,8 +1481,8 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy { type volumeFactory func(uniqueID int) *v1.PersistentVolume func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { - return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */) + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { + return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */) } } @@ -1501,7 +1501,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC } func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer storageClass := &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -1522,7 +1522,7 @@ func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFac return pv } - return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */) + return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */) } } @@ -1537,7 +1537,7 @@ func NewSimpleCreatePodStrategy() TestPodCreateStrategy { } func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCreateStrategy { - return func(client clientset.Interface, namespace string, podCount int) error { + return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error { basePod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: controllerName + "-pod-", @@ -1548,7 +1548,7 @@ func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCrea if err := createController(client, controllerName, namespace, podCount, basePod); err != nil { return err } - return CreatePod(client, namespace, podCount, basePod) + return CreatePod(ctx, client, namespace, podCount, basePod) } } @@ -1739,7 +1739,7 @@ type DaemonConfig struct { Timeout time.Duration } -func (config *DaemonConfig) Run() error { +func (config *DaemonConfig) Run(ctx context.Context) error { if config.Image == "" { config.Image = "registry.k8s.io/pause:3.9" } @@ -1775,7 +1775,7 @@ func (config *DaemonConfig) Run() error { var err error for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries {